diff --git a/tests/common/fixtures/duthost_utils.py b/tests/common/fixtures/duthost_utils.py index 89d48ca4f72..5af7fa02ffc 100644 --- a/tests/common/fixtures/duthost_utils.py +++ b/tests/common/fixtures/duthost_utils.py @@ -441,16 +441,21 @@ def dut_qos_maps(rand_selected_dut): """ maps = {} try: + if rand_selected_dut.is_multi_asic: + sonic_cfggen_cmd = "sonic-cfggen -n asic0 -d --var-json" + else: + sonic_cfggen_cmd = "sonic-cfggen -d --var-json" + # port_qos_map - maps['port_qos_map'] = json.loads(rand_selected_dut.shell("sonic-cfggen -d --var-json 'PORT_QOS_MAP'")['stdout']) + maps['port_qos_map'] = json.loads(rand_selected_dut.shell("{} 'PORT_QOS_MAP'".format(sonic_cfggen_cmd))['stdout']) # dscp_to_tc_map - maps['dscp_to_tc_map'] = json.loads(rand_selected_dut.shell("sonic-cfggen -d --var-json 'DSCP_TO_TC_MAP'")['stdout']) + maps['dscp_to_tc_map'] = json.loads(rand_selected_dut.shell("{} 'DSCP_TO_TC_MAP'".format(sonic_cfggen_cmd))['stdout']) # tc_to_queue_map - maps['tc_to_queue_map'] = json.loads(rand_selected_dut.shell("sonic-cfggen -d --var-json 'TC_TO_QUEUE_MAP'")['stdout']) + maps['tc_to_queue_map'] = json.loads(rand_selected_dut.shell("{} 'TC_TO_QUEUE_MAP'".format(sonic_cfggen_cmd))['stdout']) # tc_to_priority_group_map - maps['tc_to_priority_group_map'] = json.loads(rand_selected_dut.shell("sonic-cfggen -d --var-json 'TC_TO_PRIORITY_GROUP_MAP'")['stdout']) + maps['tc_to_priority_group_map'] = json.loads(rand_selected_dut.shell("{} 'TC_TO_PRIORITY_GROUP_MAP'".format(sonic_cfggen_cmd))['stdout']) # tc_to_dscp_map - maps['tc_to_dscp_map'] = json.loads(rand_selected_dut.shell("sonic-cfggen -d --var-json 'TC_TO_DSCP_MAP'")['stdout']) + maps['tc_to_dscp_map'] = json.loads(rand_selected_dut.shell("{} 'TC_TO_DSCP_MAP'".format(sonic_cfggen_cmd))['stdout']) except: pass return maps diff --git a/tests/common/fixtures/ptfhost_utils.py b/tests/common/fixtures/ptfhost_utils.py index e6d8d2527d3..952e830be95 100644 --- a/tests/common/fixtures/ptfhost_utils.py +++ b/tests/common/fixtures/ptfhost_utils.py @@ -179,7 +179,7 @@ def _ptf_portmap_file(duthost, ptfhost, tbinfo): filename (str): returns the filename copied to PTF host """ intfInfo = duthost.show_interface(command = "status")['ansible_facts']['int_status'] - portList = [port for port in intfInfo if port.startswith('Ethernet') and intfInfo[port]['oper_state'] == 'up'] + portList = [port for port in intfInfo if port.startswith('Ethernet') and intfInfo[port]['oper_state'] == 'up' and intfInfo[port]['admin_state'] == 'up'] mg_facts = duthost.get_extended_minigraph_facts(tbinfo) portMapFile = "/tmp/default_interface_to_front_map.ini" with open(portMapFile, 'w') as file: @@ -520,11 +520,16 @@ def ptf_test_port_map_active_active(ptfhost, tbinfo, duthosts, mux_server_url, d if target_dut_port in mg_facts['minigraph_port_indices'].values(): router_mac = duts_running_config_facts[duthosts[target_dut_index].hostname][idx]['DEVICE_METADATA']['localhost']['mac'].lower() asic_idx = idx + for a_dut_port, a_dut_port_index in mg_facts['minigraph_port_indices'].items(): + if a_dut_port_index == target_dut_port and "Ethernet-Rec" not in a_dut_port and \ + "Ethernet-IB" not in a_dut_port and "Ethernet-BP" not in a_dut_port: + dut_port = a_dut_port break ports_map[ptf_port] = { 'target_dut': [target_dut_index], 'target_dest_mac': router_mac, 'target_src_mac': [router_mac], + 'dut_port': dut_port, 'asic_idx': asic_idx } diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 5c6da13ad8f..07938f016fd 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -658,6 +658,12 @@ qos/test_buffer_traditional.py: conditions: - "is_multi_asic==True or release not in ['201911']" +qos/test_buffer.py: + skip: + reason: "Tests are relevant to dynamic buffering not required on T2" + conditions: + - "'t2' in topo_name" + qos/test_pfc_pause.py::test_pfc_pause_lossless: # For this test, we use the fanout connected to the DUT to send PFC pause frames. # The fanout needs to send PFC frames fast enough so that the queue remains completely paused for the entire duration @@ -723,10 +729,20 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize: qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolWatermark: skip: + reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX" + conditions: + - "platform in ['x86_64-nokia_ixr7250e_36x400g-r0', 'x86_64-arista_7800r3_48cq2_lc', 'x86_64-arista_7800r3_48cqm2_lc', 'x86_64-arista_7800r3a_36d2_lc']" + xfail: reason: "Headroom pool size not supported." conditions: - "hwsku not in ['Arista-7060CX-32S-C32', 'Celestica-DX010-C32', 'Arista-7260CX3-D108C8', 'Force10-S6100', 'Arista-7260CX3-Q64', 'Arista-7050CX3-32S-C32', 'Arista-7050CX3-32S-D48C8']" +qos/test_qos_sai.py::TestQosSai::testQosSaiBufferPoolWatermark: + skip: + reason: "sai_thrift_read_buffer_pool_watermark are not supported on DNX" + conditions: + - "platform in ['x86_64-nokia_ixr7250e_36x400g-r0', 'x86_64-arista_7800r3_48cq2_lc', 'x86_64-arista_7800r3_48cqm2_lc', 'x86_64-arista_7800r3a_36d2_lc']" + qos/test_qos_sai.py::TestQosSai::testQosSaiPGDrop: skip: reason: "PG drop size test is not supported." diff --git a/tests/conftest.py b/tests/conftest.py index 0cfa8f0ba8e..0d8b6576ab9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1633,7 +1633,7 @@ def duts_running_config_facts(duthosts): @pytest.fixture(scope='class') def dut_test_params(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, - ptf_portmap_file, lower_tor_host): # noqa F811 + ptf_portmap_file, lower_tor_host, creds): # noqa F811 """ Prepares DUT host test params @@ -1653,7 +1653,7 @@ def dut_test_params(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, mgFacts = duthost.get_extended_minigraph_facts(tbinfo) topo = tbinfo["topo"]["name"] - yield { + rtn_dict = { "topo": topo, "hwsku": mgFacts["minigraph_hwsku"], "basicParams": { @@ -1663,10 +1663,15 @@ def dut_test_params(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, ).vars['ansible_host'], "port_map_file": ptf_portmap_file, "sonic_asic_type": duthost.facts['asic_type'], - "sonic_version": duthost.os_version + "sonic_version": duthost.os_version, + "dut_username": creds['sonicadmin_user'], + "dut_password": creds['sonicadmin_password'] } } + if 'platform_asic' in duthost.facts: + rtn_dict['basicParams']["platform_asic"] = duthost.facts['platform_asic'] + yield rtn_dict @pytest.fixture(scope='module') def duts_minigraph_facts(duthosts, tbinfo): diff --git a/tests/qos/files/qos.yml b/tests/qos/files/qos.yml index 7d5a1e1ec71..2e60564490b 100644 --- a/tests/qos/files/qos.yml +++ b/tests/qos/files/qos.yml @@ -3196,21 +3196,22 @@ qos_params: j2c+: topo-any: 100000_300m: - pkts_num_leak_out: 0 + pkts_num_leak_out: 51 + internal_hdr_size: 48 xoff_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 - pkts_num_margin: 0 + pkts_num_trig_pfc: 9874 + pkts_num_trig_ingr_drp: 10861 + pkts_num_margin: 100 xoff_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 - pkts_num_margin: 0 + pkts_num_trig_pfc: 9874 + pkts_num_trig_ingr_drp: 10861 + pkts_num_margin: 100 hdrm_pool_size: dscps: [3, 4] ecn: 1 @@ -3218,56 +3219,61 @@ qos_params: src_port_ids: [0, 2, 4, 6, 8, 10, 12, 14, 16] dst_port_id: 18 pgs_num: 18 - pkts_num_trig_pfc: 392440 + pkts_num_trig_pfc: 9974 pkts_num_hdrm_full: 362 pkts_num_hdrm_partial: 182 wm_pg_headroom: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 + pkts_num_trig_pfc: 9874 + pkts_num_trig_ingr_drp: 10861 cell_size: 4096 - pkts_num_margin: 0 + pkts_num_margin: 30 xon_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 392440 - pkts_num_dismiss_pfc: 0 + pkts_num_trig_pfc: 9874 + pkts_num_dismiss_pfc: 200 + pkts_num_margin: 150 xon_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 392440 - pkts_num_dismiss_pfc: 0 + pkts_num_trig_pfc: 9874 + pkts_num_dismiss_pfc: 200 + pkts_num_margin: 150 lossy_queue_1: dscp: 8 ecn: 1 pg: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_trig_egr_drp: 2396745 + pkts_num_margin: 20 wm_pg_shared_lossless: dscp: 3 ecn: 1 pg: 3 - pkts_num_fill_min: 0 - pkts_num_trig_pfc: 392440 + pkts_num_fill_min: 51 + pkts_num_trig_pfc: 9874 packet_size: 64 cell_size: 4096 + pkts_num_margin: 40 wm_pg_shared_lossy: dscp: 8 ecn: 1 pg: 0 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_fill_min: 51 + pkts_num_trig_egr_drp: 2396745 packet_size: 64 cell_size: 4096 + pkts_num_margin: 40 wm_q_shared_lossless: dscp: 3 ecn: 1 queue: 3 pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 392467 + pkts_num_trig_ingr_drp: 10861 cell_size: 4096 wm_buf_pool_lossless: dscp: 3 @@ -3275,8 +3281,8 @@ qos_params: pg: 3 queue: 3 pkts_num_fill_ingr_min: 0 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 + pkts_num_trig_pfc: 9974 + pkts_num_trig_ingr_drp: 10861 pkts_num_fill_egr_min: 8 cell_size: 4096 wm_q_shared_lossy: @@ -3284,7 +3290,7 @@ qos_params: ecn: 1 queue: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_trig_egr_drp: 2396745 cell_size: 4096 wm_buf_pool_lossy: dscp: 8 @@ -3292,78 +3298,26 @@ qos_params: pg: 0 queue: 0 pkts_num_fill_ingr_min: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_trig_egr_drp: 2396745 pkts_num_fill_egr_min: 0 cell_size: 4096 - ecn_1: - dscp: 8 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 4096 - ecn_2: - dscp: 8 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 4096 - ecn_3: - dscp: 0 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 4096 - ecn_4: - dscp: 0 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 4096 - wrr: - ecn: 1 - q0_num_of_pkts: 140 - q1_num_of_pkts: 140 - q2_num_of_pkts: 140 - q3_num_of_pkts: 150 - q4_num_of_pkts: 150 - q5_num_of_pkts: 140 - q6_num_of_pkts: 140 - limit: 80 - wrr_chg: - ecn: 1 - q0_num_of_pkts: 80 - q1_num_of_pkts: 80 - q2_num_of_pkts: 80 - q3_num_of_pkts: 300 - q4_num_of_pkts: 300 - q5_num_of_pkts: 80 - q6_num_of_pkts: 80 - limit: 80 - lossy_weight: 8 - lossless_weight: 30 - hdrm_pool_wm_multiplier: 1 - cell_size: 4096 - 400000_300m: - pkts_num_leak_out: 0 + pkts_num_leak_out: 71 + internal_hdr_size: 48 xoff_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 - pkts_num_margin: 0 + pkts_num_trig_pfc: 28160 + pkts_num_trig_ingr_drp: 28187 + pkts_num_margin: 100 xoff_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 - pkts_num_margin: 0 + pkts_num_trig_pfc: 28160 + pkts_num_trig_ingr_drp: 28187 + pkts_num_margin: 100 hdrm_pool_size: dscps: [3, 4] ecn: 1 @@ -3371,56 +3325,61 @@ qos_params: src_port_ids: [0, 2, 4, 6, 8, 10, 12, 14, 16] dst_port_id: 18 pgs_num: 18 - pkts_num_trig_pfc: 392440 + pkts_num_trig_pfc: 28260 pkts_num_hdrm_full: 362 pkts_num_hdrm_partial: 182 wm_pg_headroom: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 + pkts_num_trig_pfc: 28160 + pkts_num_trig_ingr_drp: 28187 cell_size: 4096 - pkts_num_margin: 0 + pkts_num_margin: 30 xon_1: dscp: 3 ecn: 1 pg: 3 - pkts_num_trig_pfc: 392440 - pkts_num_dismiss_pfc: 0 + pkts_num_trig_pfc: 28160 + pkts_num_dismiss_pfc: 200 + pkts_num_margin: 150 xon_2: dscp: 4 ecn: 1 pg: 4 - pkts_num_trig_pfc: 392440 - pkts_num_dismiss_pfc: 0 + pkts_num_trig_pfc: 28160 + pkts_num_dismiss_pfc: 200 + pkts_num_margin: 150 lossy_queue_1: dscp: 8 ecn: 1 pg: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_trig_egr_drp: 2396745 + pkts_num_margin: 3500 wm_pg_shared_lossless: dscp: 3 ecn: 1 pg: 3 - pkts_num_fill_min: 0 - pkts_num_trig_pfc: 392440 + pkts_num_fill_min: 71 + pkts_num_trig_pfc: 28160 packet_size: 64 cell_size: 4096 + pkts_num_margin: 40 wm_pg_shared_lossy: dscp: 8 ecn: 1 pg: 0 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_fill_min: 71 + pkts_num_trig_egr_drp: 2396745 packet_size: 64 cell_size: 4096 + pkts_num_margin: 40 wm_q_shared_lossless: dscp: 3 ecn: 1 queue: 3 pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 392467 + pkts_num_trig_ingr_drp: 28187 cell_size: 4096 wm_buf_pool_lossless: dscp: 3 @@ -3428,8 +3387,8 @@ qos_params: pg: 3 queue: 3 pkts_num_fill_ingr_min: 0 - pkts_num_trig_pfc: 392440 - pkts_num_trig_ingr_drp: 392467 + pkts_num_trig_pfc: 28160 + pkts_num_trig_ingr_drp: 28187 pkts_num_fill_egr_min: 8 cell_size: 4096 wm_q_shared_lossy: @@ -3437,7 +3396,7 @@ qos_params: ecn: 1 queue: 0 pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_trig_egr_drp: 2396745 cell_size: 4096 wm_buf_pool_lossy: dscp: 8 @@ -3445,7 +3404,7 @@ qos_params: pg: 0 queue: 0 pkts_num_fill_ingr_min: 0 - pkts_num_trig_egr_drp: 21845 + pkts_num_trig_egr_drp: 2396745 pkts_num_fill_egr_min: 0 cell_size: 4096 ecn_1: @@ -3495,7 +3454,7 @@ qos_params: q4_num_of_pkts: 300 q5_num_of_pkts: 80 q6_num_of_pkts: 80 - limit: 80 + limit: 150 lossy_weight: 8 lossless_weight: 30 hdrm_pool_wm_multiplier: 1 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index bc3b9f2888e..d5dd16e9ba2 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -19,6 +19,7 @@ from tests.common.fixtures.duthost_utils import dut_qos_maps, separated_dscp_to_tc_map_on_uplink # noqa F401 from tests.common.utilities import wait_until from tests.ptf_runner import ptf_runner +from tests.common.system_utils import docker logger = logging.getLogger(__name__) @@ -58,7 +59,8 @@ def isBufferInApplDb(self, dut_asic): return self.buffer_model @pytest.fixture(scope='class', autouse=True) - def dutTestParams(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, dut_test_params, tbinfo): + def dutTestParams(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, + dut_test_params, tbinfo): """ Prepares DUT host test params Returns: @@ -68,6 +70,7 @@ def dutTestParams(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, dut if dut_test_params["topo"] in self.SUPPORTED_T0_TOPOS: dut_test_params["basicParams"]["router_mac"] = '' + dut_test_params["basicParams"]["asic_id"] = enum_frontend_asic_index # For dualtor qos test scenario, DMAC of test traffic is default vlan interface's MAC address. # To reduce duplicated code, put "is_dualtor" and "def_vlan_mac" into dutTestParams['basicParams']. if "dualtor" in tbinfo["topo"]["name"]: @@ -494,6 +497,7 @@ def dutConfig( duthost = lower_tor_host else: duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + dut_asic = duthost.asic_instance(enum_frontend_asic_index) dut_asic = duthost.asic_instance(enum_frontend_asic_index) dutLagInterfaces = [] @@ -607,7 +611,7 @@ def dutConfig( if "." in iface: iface, vlan_id = iface.split(".") portIndex = mgFacts["minigraph_ptf_indices"][iface] - portIpMap = {'peer_addr': addr["peer_ipv4"]} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': iface } if vlan_id is not None: portIpMap['vlan_id'] = vlan_id dutPortIps.update({portIndex: portIpMap}) @@ -616,7 +620,7 @@ def dutConfig( iter(mgFacts["minigraph_portchannels"][iface]["members"]) ) portIndex = mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"]} + portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': portName } dutPortIps.update({portIndex: portIpMap}) testPortIds = sorted(dutPortIps.keys()) @@ -676,9 +680,15 @@ def dutConfig( "downlink_port_names": downlinkPortNames }) dutinterfaces = {} - for port, index in mgFacts["minigraph_ptf_indices"].items(): - if 'Ethernet-Rec' not in port and 'Ethernet-IB' not in port: - dutinterfaces[index] = port + + if tbinfo["topo"]["type"] == "t2": + for ptf_port, ptf_val in dutPortIps.items(): + dutinterfaces[ptf_port] = ptf_val['port'] + else: + for port, index in mgFacts["minigraph_ptf_indices"].items(): + if 'Ethernet-Rec' not in port and 'Ethernet-IB' not in port: + dutinterfaces[index] = port + yield { "dutInterfaces": dutinterfaces, "testPortIds": testPortIds, @@ -999,34 +1009,37 @@ def dutQosConfig( qosParams = qpm.run() elif 'broadcom' in duthost.facts['asic_type'].lower(): - bufferConfig = self.dutBufferConfig(duthost) - pytest_assert(len(bufferConfig) == 4, "buffer config is incompleted") - pytest_assert('BUFFER_POOL' in bufferConfig, 'BUFFER_POOL is not exist in bufferConfig') - pytest_assert('BUFFER_PROFILE' in bufferConfig, 'BUFFER_PROFILE is not exist in bufferConfig') - pytest_assert('BUFFER_QUEUE' in bufferConfig, 'BUFFER_QUEUE is not exist in bufferConfig') - pytest_assert('BUFFER_PG' in bufferConfig, 'BUFFER_PG is not exist in bufferConfig') - - current_file_dir = os.path.dirname(os.path.realpath(__file__)) - sub_folder_dir = os.path.join(current_file_dir, "files/brcm/") - if sub_folder_dir not in sys.path: - sys.path.append(sub_folder_dir) - import qos_param_generator - qpm = qos_param_generator.QosParamBroadcom(qosConfigs['qos_params'][dutAsic][dutTopo], - dutAsic, - portSpeedCableLength, - dutConfig, - ingressLosslessProfile, - ingressLossyProfile, - egressLosslessProfile, - egressLossyProfile, - sharedHeadroomPoolSize, - dutConfig["dualTor"], - dutTopo, - bufferConfig, - duthost, - tbinfo["topo"]["name"]) - qosParams = qpm.run() - + if 'platform_asic' in duthost.facts and duthost.facts['platform_asic'] == 'broadcom-dnx': + logger.info("THDI_BUFFER_CELL_LIMIT_SP is not valid for broadcom DNX - ignore dynamic buffer config") + qosParams = qosConfigs['qos_params'][dutAsic][dutTopo] + else: + bufferConfig = self.dutBufferConfig(duthost) + pytest_assert(len(bufferConfig) == 4, "buffer config is incompleted") + pytest_assert('BUFFER_POOL' in bufferConfig, 'BUFFER_POOL is not exist in bufferConfig') + pytest_assert('BUFFER_PROFILE' in bufferConfig, 'BUFFER_PROFILE is not exist in bufferConfig') + pytest_assert('BUFFER_QUEUE' in bufferConfig, 'BUFFER_QUEUE is not exist in bufferConfig') + pytest_assert('BUFFER_PG' in bufferConfig, 'BUFFER_PG is not exist in bufferConfig') + + current_file_dir = os.path.dirname(os.path.realpath(__file__)) + sub_folder_dir = os.path.join(current_file_dir, "files/brcm/") + if sub_folder_dir not in sys.path: + sys.path.append(sub_folder_dir) + import qos_param_generator + qpm = qos_param_generator.QosParamBroadcom(qosConfigs['qos_params'][dutAsic][dutTopo], + dutAsic, + portSpeedCableLength, + dutConfig, + ingressLosslessProfile, + ingressLossyProfile, + egressLosslessProfile, + egressLossyProfile, + sharedHeadroomPoolSize, + dutConfig["dualTor"], + dutTopo, + bufferConfig, + duthost, + tbinfo["topo"]["name"]) + qosParams = qpm.run() else: qosParams = qosConfigs['qos_params'][dutAsic][dutTopo] yield { diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 3d9dac9e9ba..0d0c7465fb2 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -137,6 +137,11 @@ def testQosSaiPfcXoffLimit( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -214,6 +219,12 @@ def testPfcStormWithSharedHeadroomOccupancy( "pkts_num_trig_pfc": qosConfig[xonProfile]["pkts_num_trig_pfc"], "pkts_num_private_headrooom": dutQosConfig["param"]["pkts_num_private_headrooom"] }) + + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "packet_size" in qosConfig[xonProfile].keys(): testParams["packet_size"] = qosConfig[xonProfile]["packet_size"] if 'cell_size' in qosConfig[xonProfile].keys(): @@ -384,6 +395,11 @@ def testQosSaiPfcXonLimit( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -447,6 +463,11 @@ def testQosSaiLosslessVoq( "pkts_num_trig_pfc": qosConfig[LosslessVoqProfile]["pkts_num_trig_pfc"] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_margin" in qosConfig[LosslessVoqProfile].keys(): testParams["pkts_num_margin"] = qosConfig[LosslessVoqProfile]["pkts_num_margin"] @@ -584,6 +605,11 @@ def testQosSaiHeadroomPoolSize( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + pkts_num_trig_pfc_shp = qosConfig["hdrm_pool_size"].get("pkts_num_trig_pfc_shp") if pkts_num_trig_pfc_shp: testParams["pkts_num_trig_pfc_shp"] = pkts_num_trig_pfc_shp @@ -652,6 +678,11 @@ def testQosSaiSharedReservationSize( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "packet_size" in qosConfig[sharedResSizeKey]: testParams["packet_size"] = qosConfig[sharedResSizeKey]["packet_size"] @@ -734,6 +765,11 @@ def testQosSaiHeadroomPoolWatermark( if dynamic_threshold: testParams["dynamic_threshold"] = dynamic_threshold + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -767,7 +803,9 @@ def testQosSaiBufferPoolWatermark( RunAnsibleModuleFail if ptf test fails """ disableTest = request.config.getoption("--disable_test") - if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000': + if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000' or \ + ('platform_asic' in dutTestParams["basicParams"] and + dutTestParams["basicParams"]["platform_asic"] == "broadcom-dnx"): disableTest = False if disableTest: pytest.skip("Buffer Pool watermark test is disabled") @@ -804,6 +842,11 @@ def testQosSaiBufferPoolWatermark( "buf_pool_roid": buf_pool_roid }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "packet_size" in qosConfig[bufPool].keys(): testParams["packet_size"] = qosConfig[bufPool]["packet_size"] @@ -860,6 +903,11 @@ def testQosSaiLossyQueue( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -922,6 +970,11 @@ def testQosSaiLossyQueueVoq( "pkts_num_trig_egr_drp": qosConfig[LossyVoq]["pkts_num_trig_egr_drp"] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "packet_size" in qosConfig[LossyVoq].keys(): testParams["packet_size"] = qosConfig[LossyVoq]["packet_size"] testParams["cell_size"] = qosConfig[LossyVoq]["cell_size"] @@ -973,6 +1026,11 @@ def testQosSaiDscpQueueMapping( "dual_tor_scenario": dutConfig['dualTorScenario'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.DscpMappingPB", testParams=testParams @@ -1028,6 +1086,11 @@ def testQosSaiSeparatedDscpQueueMapping(self, duthost, ptfhost, dutTestParams, d }) testParams.update({"leaf_downstream": False}) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.DscpMappingPB", testParams=testParams @@ -1060,6 +1123,12 @@ def testQosSaiDot1pQueueMapping( "src_port_ip": dutConfig["testPorts"]["src_port_ip"], "vlan_id": dutConfig["testPorts"]["src_port_vlan"] }) + + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.Dot1pToQueueMapping", testParams=testParams @@ -1092,6 +1161,12 @@ def testQosSaiDot1pPgMapping( "src_port_ip": dutConfig["testPorts"]["src_port_ip"], "vlan_id": dutConfig["testPorts"]["src_port_vlan"] }) + + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.Dot1pToPgMapping", testParams=testParams @@ -1143,6 +1218,11 @@ def testQosSaiDwrr( "qos_remap_enable": qos_remap_enable }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "lossy_queue_1" in dutQosConfig["param"][portSpeedCableLength].keys(): testParams["ecn"] = qosConfig[portSpeedCableLength]["lossy_queue_1"]["ecn"] else: @@ -1208,6 +1288,11 @@ def testQosSaiPgSharedWatermark( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -1217,6 +1302,10 @@ def testQosSaiPgSharedWatermark( if "pkts_num_margin" in qosConfig[pgProfile].keys(): testParams["pkts_num_margin"] = qosConfig[pgProfile]["pkts_num_margin"] + # For J2C+ we need the internal header size in calculating the shared watermarks + if "internal_hdr_size" in qosConfig.keys(): + testParams["internal_hdr_size"] = qosConfig["internal_hdr_size"] + self.runPtfTest( ptfhost, testCase="sai_qos_tests.PGSharedWatermarkTest", testParams=testParams @@ -1266,6 +1355,11 @@ def testQosSaiPgHeadroomWatermark( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -1323,6 +1417,11 @@ def testQosSaiPGDrop( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.PGDropTest", testParams=testParams ) @@ -1383,6 +1482,11 @@ def testQosSaiQSharedWatermark( "hwsku":dutTestParams['hwsku'] }) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + if "pkts_num_egr_mem" in qosConfig.keys(): testParams["pkts_num_egr_mem"] = qosConfig["pkts_num_egr_mem"] @@ -1417,7 +1521,9 @@ def testQosSaiDscpToPgMapping( RunAnsibleModuleFail if ptf test fails """ disableTest = request.config.getoption("--disable_test") - if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000': + if dutTestParams["basicParams"]["sonic_asic_type"] == 'cisco-8000' or \ + ('platform_asic' in dutTestParams["basicParams"] and + dutTestParams["basicParams"]["platform_asic"] == "broadcom-dnx"): disableTest = False if disableTest: pytest.skip("DSCP to PG mapping test disabled") @@ -1433,6 +1539,12 @@ def testQosSaiDscpToPgMapping( "src_port_id": dutConfig["testPorts"]["src_port_id"], "src_port_ip": dutConfig["testPorts"]["src_port_ip"], }) + + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.DscpToPgMapping", testParams=testParams @@ -1485,6 +1597,11 @@ def testQosSaiSeparatedDscpToPgMapping(self, duthost, request, ptfhost, dutTestP testParams['dscp_to_pg_map'] = load_dscp_to_pg_map(duthost, src_port_name, dut_qos_maps) + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.DscpToPgMapping", testParams=testParams @@ -1538,6 +1655,12 @@ def testQosSaiDwrrWeightChange( "topo": dutTestParams["topo"], "qos_remap_enable": qos_remap_enable }) + + if "platform_asic" in dutTestParams["basicParams"]: + testParams["platform_asic"] = dutTestParams["basicParams"]["platform_asic"] + else: + testParams["platform_asic"] = None + self.runPtfTest( ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams ) diff --git a/tests/saitests/py3/sai_base_test.py b/tests/saitests/py3/sai_base_test.py index 3f1ac1fecb4..91924f67d5a 100644 --- a/tests/saitests/py3/sai_base_test.py +++ b/tests/saitests/py3/sai_base_test.py @@ -23,9 +23,16 @@ from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol +import socket +import sys +import paramiko +from paramiko.ssh_exception import BadHostKeyException, AuthenticationException, SSHException interface_to_front_mapping = {} +from switch import (sai_thrift_port_tx_enable, + sai_thrift_port_tx_disable) + class ThriftInterface(BaseTest): def setUp(self): @@ -35,10 +42,12 @@ def setUp(self): self.test_params = testutils.test_params_get() if "server" in self.test_params: - server = self.test_params['server'] + self.server = self.test_params['server'] else: - server = 'localhost' + self.server = 'localhost' + self.platform_asic = self.test_params['platform_asic'] + self.asic_id = self.test_params['asic_id'] if "port_map" in self.test_params: user_input = self.test_params['port_map'] splitted_map = user_input.split(",") @@ -57,7 +66,7 @@ def setUp(self): exit("No ptf interface<-> switch front port mapping, please specify as parameter or in external file") # Set up thrift client and contact server - self.transport = TSocket.TSocket(server, 9092) + self.transport = TSocket.TSocket(self.server, 9092) self.transport = TTransport.TBufferedTransport(self.transport) self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport) @@ -70,6 +79,64 @@ def tearDown(self): BaseTest.tearDown(self) self.transport.close() + def exec_cmd_on_dut(self, hostname, username, password, cmd): + client = paramiko.SSHClient() + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + if isinstance(cmd, list): + cmd = ' '.join(cmd) + + stdOut = stdErr = [] + retValue = 1 + try: + client.connect(hostname, username=username, password=password, allow_agent=False) + si, so, se = client.exec_command(cmd, timeout=20) + stdOut = so.readlines() + stdErr = se.readlines() + retValue = 0 + except AuthenticationException as authenticationException: + print('SSH Authentication failure with message: %s' % authenticationException, file=sys.stderr) + except SSHException as sshException: + print('SSH Command failed with message: %s' % sshException, file=sys.stderr) + except BadHostKeyException as badHostKeyException: + print('SSH Authentication failure with message: %s' % badHostKeyException, file=sys.stderr) + except socket.timeout as e: + # The ssh session will timeout in case of a successful reboot + print('Caught exception socket.timeout: {}, {}, {}'.format(repr(e), str(e), type(e)), file=sys.stderr) + retValue = 255 + except Exception as e: + print('Exception caught: {}, {}, type: {}'.format(repr(e), str(e), type(e)), file=sys.stderr) + print(sys.exc_info(), file=sys.stderr) + finally: + client.close() + + return stdOut, stdErr, retValue + + def sai_thrift_port_tx_enable(self, client, asic_type, port_list, last_port=True): + if self.platform_asic and self.platform_asic == "broadcom-dnx" and last_port: + # need to enable watchdog on the source asic using cint script + cmd = "bcmcmd -n {} \"BCMSAI credit-watchdog enable\"".format(self.asic_id) + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.server, + self.test_params['dut_username'], + self.test_params['dut_password'], + cmd) + assert ('Success rv = 0' in stdOut[1], "enable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.asic_id, + self.server)) + + sai_thrift_port_tx_enable(client, asic_type, port_list) + def sai_thrift_port_tx_disable(self, client, asic_type, port_list): + if self.platform_asic and self.platform_asic == "broadcom-dnx": + # need to enable watchdog on the source asic using cint script + cmd = "bcmcmd -n {} \"BCMSAI credit-watchdog disable\"".format(self.asic_id) + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.server, + self.test_params['dut_username'], + self.test_params['dut_password'], + cmd) + assert ('Success rv = 0' in stdOut[1]), "disable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.asic_id, + self.server) + sai_thrift_port_tx_disable(client, asic_type, port_list) + + class ThriftInterfaceDataPlane(ThriftInterface): """ Root class that sets up the thrift interface and dataplane diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 1c2d7aa508b..aa501655832 100644 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -32,9 +32,7 @@ sai_thrift_read_pg_shared_watermark, sai_thrift_read_buffer_pool_watermark, sai_thrift_read_headroom_pool_watermark, - sai_thrift_read_queue_occupancy, - sai_thrift_port_tx_disable, - sai_thrift_port_tx_enable) + sai_thrift_read_queue_occupancy) from switch_sai_thrift.ttypes import (sai_thrift_attribute_value_t, sai_thrift_attribute_t) from switch_sai_thrift.sai_headers import (SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, @@ -104,6 +102,7 @@ def check_leackout_compensation_support(asic, hwsku): def dynamically_compensate_leakout(thrift_client, counter_checker, check_port, check_field, base, ptf_test, compensate_port, compensate_pkt, max_retry): prev = base + time.sleep(1.5) curr, _ = counter_checker(thrift_client, check_port) leakout_num = curr[check_field] - prev[check_field] retry = 0 @@ -297,7 +296,7 @@ def runTest(self): asic_type = self.test_params['sonic_asic_type'] - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, asic_type, list(port_list.keys())) # DSCP to queue mapping @@ -666,9 +665,17 @@ def runTest(self): file=sys.stderr) for i in range(0, PG_NUM): if i == pg: - assert(pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) + if i == 0 or i == 4: + assert (pg_cntrs[pg] >= pg_cntrs_base[pg] + len(dscps)) + else: + assert (pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) else: - assert(pg_cntrs[i] == pg_cntrs_base[i]) + # LACP packets are mapped to queue0 and tcp syn packets for BGP to queue4 + # So for those queues the count could be more + if i == 0 or i == 4: + assert(pg_cntrs[i] >= pg_cntrs_base[i]) + else: + assert (pg_cntrs[i] == pg_cntrs_base[i]) # confirm that dscp pkts are received total_recv_cnt = 0 @@ -760,7 +767,7 @@ def runTest(self): try: # Disable tx on EGRESS port so that headroom buffer cannot be free - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) # There are packet leak even port tx is disabled (18 packets leak on TD3 found) # Hence we send some packet to fill the leak before testing @@ -799,7 +806,7 @@ def runTest(self): assert(pg_shared_wm_res[pg] - pg_shared_wm_res_base[pg] >= (PKT_NUM - ERROR_TOLERANCE[pg]) * cell_size) finally: # Enable tx on dest port - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) # DOT1P to pg mapping @@ -950,6 +957,7 @@ def runTest(self): pkts_num_trig_ingr_drp = int( self.test_params['pkts_num_trig_ingr_drp']) hwsku = self.test_params['hwsku'] + platform_asic = self.test_params['platform_asic'] pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac # get counter names to query @@ -1018,7 +1026,7 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) try: # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and @@ -1127,12 +1135,16 @@ def runTest(self): sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\txmit_counters {}\n\txmit_counters_base {}\n'.format(test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port pfc assert(recv_counters[pg] > recv_counters_base[pg]), 'unexpectedly PFC counter not increase, {}'.format(test_stage) - # recv port ingress drop - for cntr in ingress_counters: - assert(recv_counters[cntr] > recv_counters_base[cntr]), 'unexpectedly RX drop counter not increase, {}'.format(test_stage) - # xmit port no egress drop - for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), 'unexpectedly TX drop counter increase, {}'.format(test_stage) + + if platform_asic and platform_asic == "broadcom-dnx": + logging.info ("On J2C+ don't support port level drop counters - so ignoring this step for now") + else: + # recv port ingress drop + for cntr in ingress_counters: + assert(recv_counters[cntr] > recv_counters_base[cntr]), 'unexpectedly RX drop counter not increase, {}'.format(test_stage) + # xmit port no egress drop + for cntr in egress_counters: + assert(xmit_counters[cntr] == xmit_counters_base[cntr]), 'unexpectedly TX drop counter increase, {}'.format(test_stage) if '201811' not in sonic_version and 'mellanox' in asic_type: pg_dropped_cntrs = sai_thrift_read_pg_drop_counters( @@ -1155,7 +1167,7 @@ def runTest(self): assert pg_dropped_cntrs[i] == pg_dropped_cntrs_old[i] finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) class LosslessVoq(sai_base_test.ThriftInterfaceDataPlane): @@ -1261,7 +1273,7 @@ def runTest(self): else: margin = 2 - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) try: assert(fill_leakout_plus_one(self, src_port_1_id, dst_port_id, pkt, int(self.test_params['pg']), asic_type)) @@ -1325,7 +1337,7 @@ def runTest(self): assert(xmit_counters[cntr] == xmit_counters_base[cntr]) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) # Base class used for individual PTF runs used in the following: testPfcStormWithSharedHeadroomOccupancy @@ -1404,7 +1416,7 @@ def runTest(self): ) logging.info("Disabling xmit ports: {}".format(self.dst_port_id)) - sai_thrift_port_tx_disable( + self.sai_thrift_port_tx_disable( self.client, self.asic_type, [self.dst_port_id]) xmit_counters_base, queue_counters = sai_thrift_read_port_counters( @@ -1462,7 +1474,7 @@ def runTest(self): ) logging.info("Enable xmit ports: {}".format(self.dst_port_id)) - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, self.asic_type, [self.dst_port_id]) # allow enough time for the dut to sync up the counter values in counters_db @@ -1506,7 +1518,7 @@ def runTest(self): time.sleep(1) switch_init(self.client) self.parse_test_params() - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, self.asic_type, [self.dst_port_id]) @@ -1660,7 +1672,7 @@ def runTest(self): step_id = 1 step_desc = 'disable TX for dst_port_id, dst_port_2_id, dst_port_3_id' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - sai_thrift_port_tx_disable(self.client, asic_type, [ + self.sai_thrift_port_tx_disable(self.client, asic_type, [ dst_port_id, dst_port_2_id, dst_port_3_id]) try: @@ -1816,7 +1828,7 @@ def runTest(self): step_id += 1 step_desc = 'enable TX for dst_port_2_id, to drain off buffer in dst_port_2' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_2_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_2_id]) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) @@ -1857,7 +1869,7 @@ def runTest(self): step_id += 1 step_desc = 'enable TX for dst_port_3_id, to drain off buffer in dst_port_3' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_3_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_3_id]) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) @@ -1914,7 +1926,7 @@ def runTest(self): assert(xmit_3_counters[cntr] == xmit_3_counters_base[cntr]), 'unexpectedly egress drop on xmit port 3 (counter: {}), at step {} {}'.format(port_counter_fields[cntr], step_id, step_desc) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [ + self.sai_thrift_port_tx_enable(self.client, asic_type, [ dst_port_id, dst_port_2_id, dst_port_3_id]) @@ -2044,6 +2056,8 @@ def show_port_counter(self, rx_base, tx_base, banner): sys.stderr.write('{}\n{}\n'.format(banner, port_cnt_tbl)) def runTest(self): + platform_asic = self.test_params['platform_asic'] + margin = self.test_params.get('margin') if not margin: margin = 0 @@ -2063,7 +2077,7 @@ def runTest(self): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) # Pause egress of dut xmit port - sai_thrift_port_tx_disable( + self.sai_thrift_port_tx_disable( self.client, self.asic_type, [self.dst_port_id]) try: @@ -2234,15 +2248,26 @@ def runTest(self): self.show_port_counter(recv_counters_bases, xmit_counters_base, 'To fill last PG and trigger ingress drop, send {} pkt with DSCP {} PG {} from src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) - recv_counters, _ = sai_thrift_read_port_counters(self.client, port_list[self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) - # assert ingress drop - for cntr in self.ingress_counters: - assert(recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) + recv_counters, queue_counters = sai_thrift_read_port_counters( + self.client, port_list[self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support port level drop counters - so ignoring this step for now") + else: + # assert ingress drop + for cntr in self.ingress_counters: + assert( + recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) # assert no egress drop at the dut xmit port - xmit_counters, _ = sai_thrift_read_port_counters(self.client, port_list[self.dst_port_id]) - for cntr in self.egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]) + xmit_counters, queue_counters = sai_thrift_read_port_counters( + self.client, port_list[self.dst_port_id]) + + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support port level drop counters - so ignoring this step for now") + else: + for cntr in self.egress_counters: + assert(xmit_counters[cntr] == xmit_counters_base[cntr]) print("pg hdrm filled", file=sys.stderr) if self.wm_multiplier: @@ -2260,7 +2285,7 @@ def runTest(self): sys.stderr.flush() finally: - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, self.asic_type, [self.dst_port_id]) @@ -2350,7 +2375,7 @@ def runTest(self): # Disable all dst ports uniq_dst_ports = list(set(self.dst_port_ids)) - sai_thrift_port_tx_disable(self.client, self.asic_type, uniq_dst_ports) + self.sai_thrift_port_tx_disable(self.client, self.asic_type, uniq_dst_ports) try: for i in range(len(self.src_port_ids)): @@ -2426,7 +2451,7 @@ def runTest(self): assert drops == 0, "Detected %d egress drops" % drops finally: - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, self.asic_type, uniq_dst_ports) # TODO: remove sai_thrift_clear_all_counters and change to use incremental counter values @@ -2613,6 +2638,8 @@ def runTest(self): limit = int(self.test_params['limit']) pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) topo = self.test_params['topo'] + hwsku = self.test_params['hwsku'] + platform_asic = self.test_params['platform_asic'] if 'backend' not in topo: if not qos_remap_enable: @@ -2669,7 +2696,7 @@ def runTest(self): ) print("actual dst_port_id: {}".format(dst_port_id), file=sys.stderr) - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) send_packet(self, src_port_id, pkt, pkts_num_leak_out) @@ -2696,7 +2723,7 @@ def runTest(self): p.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 41943040) # Release port - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) cnt = 0 pkts = [] @@ -2748,8 +2775,10 @@ def runTest(self): print(diff_list, file=sys.stderr) for dscp, diff in diff_list: - assert diff < limit, "Difference for %d is %d which exceeds limit %d" % ( - dscp, diff, limit) + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ can't control how packets are dequeued (CS00012272267) - so ignoring diff check now") + else: + assert diff < limit, "Difference for %d is %d which exceeds limit %d" % (dscp, diff, limit) # Read counters print("DST port counters: ") @@ -2787,6 +2816,7 @@ def runTest(self): src_port_mac = self.dataplane.get_mac(0, src_port_id) asic_type = self.test_params['sonic_asic_type'] hwsku = self.test_params['hwsku'] + platform_asic = self.test_params['platform_asic'] # get counter names to query ingress_counters, egress_counters = get_counter_names(sonic_version) @@ -2846,7 +2876,7 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) try: # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and @@ -2907,13 +2937,20 @@ def runTest(self): assert(recv_counters[pg] == recv_counters_base[pg]) # recv port no ingress drop for cntr in ingress_counters: - assert(recv_counters[cntr] == recv_counters_base[cntr]) + if platform_asic and platform_asic == "broadcom-dnx" and cntr == 1: + assert (recv_counters[cntr] > recv_counters_base[cntr]) + else: + assert (recv_counters[cntr] == recv_counters_base[cntr]) + # xmit port egress drop - for cntr in egress_counters: - assert(xmit_counters[cntr] > xmit_counters_base[cntr]) + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support egress drop stats - so ignoring this step for now") + else: + for cntr in egress_counters: + assert(xmit_counters[cntr] > xmit_counters_base[cntr]) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) class LossyQueueVoqTest(sai_base_test.ThriftInterfaceDataPlane): @@ -2995,7 +3032,7 @@ def runTest(self): else: margin = 2 - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) try: if asic_type == 'cisco-8000': @@ -3039,7 +3076,7 @@ def runTest(self): assert(xmit_counters[cntr] > xmit_counters_base[cntr]) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) # pg shared pool applied to both lossy and lossless traffic @@ -3116,6 +3153,8 @@ def runTest(self): pkts_num_fill_shared = int(self.test_params['pkts_num_fill_shared']) cell_size = int(self.test_params['cell_size']) hwsku = self.test_params['hwsku'] + internal_hdr_size = self.test_params.get('internal_hdr_size', 0) + platform_asic = self.test_params['platform_asic'] if 'packet_size' in list(self.test_params.keys()): packet_length = int(self.test_params['packet_size']) @@ -3163,7 +3202,7 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[dst_port_id]) pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[src_port_id]) @@ -3216,7 +3255,11 @@ def runTest(self): None, None, None) if pkts_num_fill_min: - assert(pg_shared_wm_res[pg] == 0) + if platform_asic and platform_asic == "broadcom-dnx": + assert(pg_shared_wm_res[pg] <= + ((pkts_num_leak_out + pkts_num_fill_min) * (packet_length + internal_hdr_size))) + else: + assert(pg_shared_wm_res[pg] == 0) else: # on t1-lag, we found vm will keep sending control # packets, this will cause the watermark to be 2 * 208 bytes @@ -3263,8 +3306,7 @@ def runTest(self): self.client, port_list[src_port_id]) print("Received packets: %d" % (pg_cntrs[pg] - pg_cntrs_base[pg]), file=sys.stderr) - print("lower bound: %d, actual value: %d, upper bound (+%d): %d" % (expected_wm * cell_size, - pg_shared_wm_res[pg], margin, (expected_wm + margin) * cell_size), file=sys.stderr) + self.show_stats('To fill PG share pool, send {} pkt'.format(pkts_num), asic_type, pg, src_port_id, dst_port_id, ingress_counters, egress_counters, @@ -3273,7 +3315,17 @@ def runTest(self): None, pg_cntrs, pg_shared_wm_res, None, None, None) - assert(expected_wm * cell_size <= pg_shared_wm_res[pg] <= (expected_wm + margin) * cell_size) + if platform_asic and platform_asic == "broadcom-dnx": + print("lower bound: %d, actual value: %d, upper bound (+%d): %d" % (expected_wm * cell_size, + pg_shared_wm_res[pg], margin, (expected_wm + margin) * (packet_length + internal_hdr_size)),file=sys.stderr) + assert (pg_shared_wm_res[pg] <= + ((pkts_num_leak_out + pkts_num_fill_min + expected_wm + margin) * (packet_length + internal_hdr_size))) + else: + print("lower bound: %d, actual value: %d, upper bound (+%d): %d" % (expected_wm * cell_size, + pg_shared_wm_res[pg], margin, (expected_wm + margin) * cell_size),file=sys.stderr) + assert(pg_shared_wm_res[pg] <= ( + expected_wm + margin) * cell_size) + assert(expected_wm * cell_size <= pg_shared_wm_res[pg]) pkts_num = pkts_inc @@ -3286,8 +3338,7 @@ def runTest(self): self.client, port_list[src_port_id]) print("Received packets: %d" % (pg_cntrs[pg] - pg_cntrs_base[pg]), file=sys.stderr) - print("exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % ( - pkts_num, ((expected_wm + cell_occupancy) * cell_size), pg_shared_wm_res[pg]), file=sys.stderr) + self.show_stats('To overflow PG share pool, send {} pkt'.format(pkts_num), asic_type, pg, src_port_id, dst_port_id, ingress_counters, egress_counters, @@ -3297,10 +3348,19 @@ def runTest(self): None, None, None) assert(fragment < cell_occupancy) - assert(expected_wm * cell_size <= pg_shared_wm_res[pg] <= (expected_wm + margin + cell_occupancy) * cell_size) + + if platform_asic and platform_asic == "broadcom-dnx": + print("exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % ( + pkts_num, ((expected_wm + cell_occupancy) * (packet_length + internal_hdr_size)), pg_shared_wm_res[pg]), file=sys.stderr) + assert (expected_wm * (packet_length + internal_hdr_size) <= (expected_wm + margin + cell_occupancy) * (packet_length + internal_hdr_size)) + + else: + print("exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % ( + pkts_num, ((expected_wm + cell_occupancy) * cell_size), pg_shared_wm_res[pg]), file=sys.stderr) + assert (expected_wm * cell_size <= pg_shared_wm_res[pg] <= (expected_wm + margin + cell_occupancy) * cell_size) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) # pg headroom is a notion for lossless traffic only @@ -3331,6 +3391,7 @@ def runTest(self): self.test_params['pkts_num_trig_ingr_drp']) cell_size = int(self.test_params['cell_size']) hwsku = self.test_params['hwsku'] + platform_asic = self.test_params['platform_asic'] # Prepare TCP packet data ttl = 64 @@ -3376,7 +3437,7 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) @@ -3402,7 +3463,11 @@ def runTest(self): q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks( self.client, port_list[src_port_id]) - assert(pg_headroom_wm_res[pg] == 0) + if platform_asic and platform_asic == "broadcom-dnx": + logging.info ("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ + "stat - so ignoring this step for now") + else: + assert(pg_headroom_wm_res[pg] == 0) send_packet(self, src_port_id, pkt, margin) @@ -3430,10 +3495,15 @@ def runTest(self): print("lower bound: %d, actual value: %d, upper bound: %d" % ((expected_wm - margin) * cell_size * cell_occupancy, pg_headroom_wm_res[pg], ((expected_wm + margin) * cell_size * cell_occupancy)), file=sys.stderr) - assert(pg_headroom_wm_res[pg] <= ( - expected_wm + margin) * cell_size * cell_occupancy) - assert((expected_wm - margin) * cell_size * - cell_occupancy <= pg_headroom_wm_res[pg]) + + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ + "stat - so ignoring this step for now") + else: + assert(pg_headroom_wm_res[pg] <= ( + expected_wm + margin) * cell_size * cell_occupancy) + assert((expected_wm - margin) * cell_size * + cell_occupancy <= pg_headroom_wm_res[pg]) pkts_num = pkts_inc @@ -3447,13 +3517,18 @@ def runTest(self): ((expected_wm - margin) * cell_size * cell_occupancy, pg_headroom_wm_res[pg], ((expected_wm + margin) * cell_size * cell_occupancy)), file=sys.stderr) assert(expected_wm == total_hdrm) - assert(pg_headroom_wm_res[pg] <= ( - expected_wm + margin) * cell_size * cell_occupancy) - assert((expected_wm - margin) * cell_size * - cell_occupancy <= pg_headroom_wm_res[pg]) + + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ + "stat - so ignoring this step for now") + else: + assert(pg_headroom_wm_res[pg] <= ( + expected_wm + margin) * cell_size * cell_occupancy) + assert((expected_wm - margin) * cell_size * + cell_occupancy <= pg_headroom_wm_res[pg]) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) class PGDropTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): @@ -3506,7 +3581,7 @@ def runTest(self): pass_iterations=0 assert iterations > 0, "Need at least 1 iteration" for test_i in range(iterations): - sai_thrift_port_tx_disable( + self.sai_thrift_port_tx_disable( self.client, asic_type, [dst_port_id]) pg_dropped_cntrs_base=sai_thrift_read_pg_drop_counters( @@ -3545,7 +3620,7 @@ def runTest(self): print("expected trig drop: {}, actual trig drop: {}, diff: {}".format( pkts_num_trig_ingr_drp, actual_num_trig_ingr_drp, ingr_drop_diff), file=sys.stderr) - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, asic_type, [dst_port_id]) print("pass iterations: {}, total iterations: {}, margin: {}".format( @@ -3554,7 +3629,7 @@ def runTest(self): 0.75 * iterations), "Passed iterations {} insufficient to meet minimum required iterations {}".format(pass_iterations, int(0.75 * iterations)) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) class QSharedWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): @@ -3640,6 +3715,7 @@ def runTest(self): pkts_num_trig_drp=int(self.test_params['pkts_num_trig_drp']) cell_size=int(self.test_params['cell_size']) hwsku=self.test_params['hwsku'] + platform_asic = self.test_params['platform_asic'] if 'packet_size' in list(self.test_params.keys()): packet_length=int(self.test_params['packet_size']) @@ -3686,7 +3762,7 @@ def runTest(self): recv_counters_base, _ = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[dst_port_id]) q_wm_res_base, pg_shared_wm_res_base, pg_headroom_wm_res_base = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) @@ -3764,7 +3840,7 @@ def runTest(self): fragment=total_shared - expected_wm if 'cisco-8000' in asic_type: - sai_thrift_port_tx_disable( + self.sai_thrift_port_tx_disable( self.client, asic_type, [dst_port_id]) assert(fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) @@ -3777,7 +3853,7 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num) if 'cisco-8000' in asic_type: - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, asic_type, [dst_port_id]) time.sleep(8) @@ -3803,12 +3879,17 @@ def runTest(self): None, pg_cntrs, None, None, None, None, None, pg_shared_wm_res, pg_headroom_wm_res, q_wm_res) - assert((expected_wm - margin) * cell_size <= q_wm_res[queue] <= (expected_wm + margin) * cell_size) + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ + "stat - so ignoring this step for now") + else: + assert(q_wm_res[queue] <= (expected_wm + margin) * cell_size) + assert((expected_wm - margin) * cell_size <= q_wm_res[queue]) pkts_num=pkts_inc if 'cisco-8000' in asic_type: - sai_thrift_port_tx_disable( + self.sai_thrift_port_tx_disable( self.client, asic_type, [dst_port_id]) assert(fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) @@ -3819,7 +3900,7 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num) if 'cisco-8000' in asic_type: - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, asic_type, [dst_port_id]) time.sleep(8) @@ -3840,10 +3921,16 @@ def runTest(self): None, None, pg_shared_wm_res, pg_headroom_wm_res, q_wm_res) assert(fragment < cell_occupancy) - assert(expected_wm * cell_size <= q_wm_res[queue] <= (expected_wm + margin) * cell_size) + + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ + "stat - so ignoring this step for now") + else: + assert(expected_wm * cell_size <= q_wm_res[queue]) + assert(q_wm_res[queue] <= (expected_wm + margin) * cell_size) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) # TODO: buffer pool roid should be obtained via rpc calls # based on the pg or queue index @@ -3936,10 +4023,10 @@ def runTest(self): # the impact caused by lossy traffic # # TH2 uses scheduler-based TX enable, this does not require sending packets to leak out - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) pkts_num_to_send += (pkts_num_leak_out + pkts_num_fill_min) send_packet(self, src_port_id, pkt, pkts_num_to_send) - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm=sai_thrift_read_buffer_pool_watermark( self.client, buf_pool_roid) - buffer_pool_wm_base @@ -3976,7 +4063,7 @@ def runTest(self): print("pkts num to send: %d, total pkts: %d, shared: %d" % (pkts_num, expected_wm, total_shared), file=sys.stderr) - sai_thrift_port_tx_disable( + self.sai_thrift_port_tx_disable( self.client, asic_type, [dst_port_id]) pkts_num_to_send += pkts_num if 'cisco-8000' in asic_type: @@ -3985,7 +4072,7 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num_to_send - 1) else: send_packet(self, src_port_id, pkt, pkts_num_to_send) - sai_thrift_port_tx_enable( + self.sai_thrift_port_tx_enable( self.client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm=sai_thrift_read_buffer_pool_watermark( @@ -4000,7 +4087,7 @@ def runTest(self): pkts_num=pkts_inc # overflow the shared pool - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) pkts_num_to_send += pkts_num if 'cisco-8000' in asic_type: assert(fill_leakout_plus_one(self, src_port_id, @@ -4009,7 +4096,7 @@ def runTest(self): else: send_packet(self, src_port_id, pkt, pkts_num_to_send) - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm=sai_thrift_read_buffer_pool_watermark( self.client, buf_pool_roid) - buffer_pool_wm_base @@ -4021,7 +4108,7 @@ def runTest(self): assert(buffer_pool_wm <= (expected_wm + extra_cap_margin) * cell_size) finally: - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) class PacketTransmit(sai_base_test.ThriftInterfaceDataPlane): @@ -4142,7 +4229,7 @@ def runTest(self): try: # Disable tx on EGRESS port so that headroom buffer cannot be free - sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) # Make a snapshot of transmitted packets tx_counters_base, _ = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) # Make a snapshot of received packets @@ -4189,5 +4276,5 @@ def runTest(self): assert(rx_counters[pg] > rx_counters_base[pg]) finally: # Enable tx on dest port - sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id])