diff --git a/tests/common/devices/sonic.py b/tests/common/devices/sonic.py index 5886a7bcf8d..9f68809002c 100644 --- a/tests/common/devices/sonic.py +++ b/tests/common/devices/sonic.py @@ -1873,22 +1873,6 @@ def is_service_running(self, service_name, docker_name): return "RUNNING" in service_status - def remove_ssh_tunnel_sai_rpc(self): - """ - Removes any ssh tunnels if present created for syncd RPC communication - - Returns: - None - """ - try: - pid_list = self.shell( - 'pgrep -f "ssh -o StrictHostKeyChecking=no -fN -L \*:9092"' - )["stdout_lines"] - except RunAnsibleModuleFail: - return - for pid in pid_list: - self.shell("kill {}".format(pid), module_ignore_errors=True) - def get_up_ip_ports(self): """ Get a list for all up ip interfaces diff --git a/tests/common/devices/sonic_asic.py b/tests/common/devices/sonic_asic.py index cae57504999..71d98e83d03 100644 --- a/tests/common/devices/sonic_asic.py +++ b/tests/common/devices/sonic_asic.py @@ -18,6 +18,7 @@ class SonicAsic(object): _MULTI_ASIC_SERVICE_NAME = "{}@{}" # service name, asic_id _MULTI_ASIC_DOCKER_NAME = "{}{}" # docker name, asic_id + _RPC_PORT_FOR_SSH_TUNNEL = 9092 def __init__(self, sonichost, asic_index): """ Initializing a ASIC on a SONiC host. @@ -325,6 +326,9 @@ def bgp_drop_rule(self, ip_version, state="present"): logger.debug(output) + def get_rpc_port_ssh_tunnel(self): + return self._RPC_PORT_FOR_SSH_TUNNEL + self.asic_index + def remove_ssh_tunnel_sai_rpc(self): """ Removes any ssh tunnels if present created for syncd RPC communication @@ -334,7 +338,15 @@ def remove_ssh_tunnel_sai_rpc(self): """ if not self.sonichost.is_multi_asic: return - return self.sonichost.remove_ssh_tunnel_sai_rpc() + + try: + pid_list = self.sonichost.shell( + r'pgrep -f "ssh -o StrictHostKeyChecking=no -fN -L \*:{}"'.format(self.get_rpc_port_ssh_tunnel()) + )["stdout_lines"] + except RunAnsibleModuleFail: + return + for pid in pid_list: + self.shell("kill {}".format(pid), module_ignore_errors=True) def create_ssh_tunnel_sai_rpc(self): """ @@ -363,9 +375,9 @@ def create_ssh_tunnel_sai_rpc(self): self.sonichost.shell( ("ssh -o StrictHostKeyChecking=no -fN" - " -L *:9092:{}:9092 localhost" - ).format(ns_docker_if_ipv4) - ) + " -L *:{}:{}:{} localhost").format(self.get_rpc_port_ssh_tunnel(), ns_docker_if_ipv4, + self._RPC_PORT_FOR_SSH_TUNNEL )) + def command(self, cmdstr): """ diff --git a/tests/common/fixtures/duthost_utils.py b/tests/common/fixtures/duthost_utils.py index 0c589fd8a7a..02b2a8bbe16 100644 --- a/tests/common/fixtures/duthost_utils.py +++ b/tests/common/fixtures/duthost_utils.py @@ -434,8 +434,8 @@ def utils_create_test_vlans(duthost, cfg_facts, vlan_ports_list, vlan_intfs_dict duthost.shell_cmds(cmds=cmds) -@pytest.fixture(scope='module') -def dut_qos_maps(rand_selected_front_end_dut): +@pytest.fixture(scope='class') +def dut_qos_maps(get_src_dst_asic_and_duts): """ A module level fixture to get QoS map from DUT host. Return a dict @@ -452,32 +452,32 @@ def dut_qos_maps(rand_selected_front_end_dut): or an empty dict if failed to parse the output """ maps = {} + dut = get_src_dst_asic_and_duts['src_dut'] try: - if rand_selected_front_end_dut.is_multi_asic: + if dut.is_multi_asic: sonic_cfggen_cmd = "sonic-cfggen -n asic0 -d --var-json" else: sonic_cfggen_cmd = "sonic-cfggen -d --var-json" # port_qos_map - port_qos_map_data = rand_selected_front_end_dut.shell("{} 'PORT_QOS_MAP'".format(sonic_cfggen_cmd))['stdout'] - maps['port_qos_map'] = json.loads(port_qos_map_data) if port_qos_map_data else None + port_qos_map = dut.shell("{} 'PORT_QOS_MAP'".format(sonic_cfggen_cmd))['stdout'] + maps['port_qos_map'] = json.loads(port_qos_map) if port_qos_map else None + # dscp_to_tc_map - dscp_to_tc_map_data = rand_selected_front_end_dut.shell( - "{} 'DSCP_TO_TC_MAP'".format(sonic_cfggen_cmd))['stdout'] - maps['dscp_to_tc_map'] = json.loads(dscp_to_tc_map_data) if dscp_to_tc_map_data else None + dscp_to_tc_map = dut.shell("{} 'DSCP_TO_TC_MAP'".format(sonic_cfggen_cmd))['stdout'] + maps['dscp_to_tc_map'] = json.loads(dscp_to_tc_map) if dscp_to_tc_map else None + # tc_to_queue_map - tc_to_queue_map_data = rand_selected_front_end_dut.shell( - "{} 'TC_TO_QUEUE_MAP'".format(sonic_cfggen_cmd))['stdout'] - maps['tc_to_queue_map'] = json.loads(tc_to_queue_map_data) if tc_to_queue_map_data else None + tc_to_queue_map = dut.shell("{} 'TC_TO_QUEUE_MAP'".format(sonic_cfggen_cmd))['stdout'] + maps['tc_to_queue_map'] = json.loads(tc_to_queue_map) if tc_to_queue_map else None + # tc_to_priority_group_map - tc_to_priority_group_map_data = rand_selected_front_end_dut.shell( - "{} 'TC_TO_PRIORITY_GROUP_MAP'".format(sonic_cfggen_cmd))['stdout'] - maps['tc_to_priority_group_map'] = json.loads( - tc_to_priority_group_map_data) if tc_to_priority_group_map_data else None + tc_to_priority_group_map = dut.shell("{} 'TC_TO_PRIORITY_GROUP_MAP'".format(sonic_cfggen_cmd))['stdout'] + maps['tc_to_priority_group_map'] = json.loads(tc_to_priority_group_map) if tc_to_priority_group_map else None + # tc_to_dscp_map - tc_to_dscp_map_data = rand_selected_front_end_dut.shell( - "{} 'TC_TO_DSCP_MAP'".format(sonic_cfggen_cmd))['stdout'] - maps['tc_to_dscp_map'] = json.loads(tc_to_dscp_map_data) if tc_to_dscp_map_data else None + tc_to_dscp_map = dut.shell("{} 'TC_TO_DSCP_MAP'".format(sonic_cfggen_cmd))['stdout'] + maps['tc_to_dscp_map'] = json.loads(tc_to_dscp_map) if tc_to_dscp_map else None except Exception as e: logger.error("Got exception: " + repr(e)) return maps diff --git a/tests/common/fixtures/ptfhost_utils.py b/tests/common/fixtures/ptfhost_utils.py index efb1f4dfc38..4fbd4a96538 100644 --- a/tests/common/fixtures/ptfhost_utils.py +++ b/tests/common/fixtures/ptfhost_utils.py @@ -508,24 +508,34 @@ def ptf_test_port_map_active_active(ptfhost, tbinfo, duthosts, mux_server_url, d # Loop ptf_map of each DUT. Each ptf_map maps from ptf port index to dut port index disabled_ptf_ports = disabled_ptf_ports.union(set(ptf_map.keys())) - router_macs = [duthost.facts['router_mac'] for duthost in duthosts] + router_macs = [] + all_dut_names = [duthost.hostname for duthost in duthosts] + for a_dut_name in tbinfo['duts']: + if a_dut_name in all_dut_names: + duthost = duthosts[a_dut_name] + router_macs.append(duthost.facts['router_mac']) + else: + router_macs.append(None) logger.info('active_dut_map={}'.format(active_dut_map)) logger.info('disabled_ptf_ports={}'.format(disabled_ptf_ports)) logger.info('router_macs={}'.format(router_macs)) - asic_idx = 0 ports_map = {} for ptf_port, dut_intf_map in tbinfo['topo']['ptf_dut_intf_map'].items(): if str(ptf_port) in disabled_ptf_ports: # Skip PTF ports that are connected to disabled VLAN interfaces continue + asic_idx = 0 + dut_port = None if len(dut_intf_map.keys()) == 2: # PTF port is mapped to two DUTs -> dualtor topology and the PTF port is a vlan port # Packet sent from this ptf port will only be accepted by the active side DUT - # DualToR DUTs use same special Vlan interface MAC address + # DualToR DUTs use same special Vlan interface MAC addres target_dut_indexes = list(map(int, active_dut_map[ptf_port])) + target_dut_port = int(list(dut_intf_map.values())[0]) + target_hostname = duthosts[target_dut_indexes[0]].hostname ports_map[ptf_port] = { 'target_dut': target_dut_indexes, 'target_dest_mac': tbinfo['topo']['properties']['topology']['DUT']['vlan_configs']['one_vlan_a'] @@ -535,30 +545,39 @@ def ptf_test_port_map_active_active(ptfhost, tbinfo, duthosts, mux_server_url, d } else: # PTF port is mapped to single DUT + dut_index_for_pft_port = int(dut_intf_map.keys()[0]) + if router_macs[dut_index_for_pft_port] is None: + continue target_dut_index = int(list(dut_intf_map.keys())[0]) target_dut_port = int(list(dut_intf_map.values())[0]) router_mac = router_macs[target_dut_index] - dut_port = None - if len(duts_minigraph_facts[duthosts[target_dut_index].hostname]) > 1: - for list_idx, mg_facts_tuple in enumerate(duts_minigraph_facts[duthosts[target_dut_index].hostname]): + target_hostname = tbinfo['duts'][target_dut_index] + + if len(duts_minigraph_facts[target_hostname]) > 1: + # Dealing with multi-asic target dut + for list_idx, mg_facts_tuple in enumerate(duts_minigraph_facts[target_hostname]): idx, mg_facts = mg_facts_tuple if target_dut_port in list(mg_facts['minigraph_port_indices'].values()): - router_mac = duts_running_config_facts[duthosts[target_dut_index].hostname][list_idx][1]\ + router_mac = duts_running_config_facts[target_hostname][list_idx][1]\ ['DEVICE_METADATA']['localhost']['mac'].lower() asic_idx = idx - for a_dut_port, a_dut_port_index in mg_facts['minigraph_port_indices'].items(): - if a_dut_port_index == target_dut_port and "Ethernet-Rec" not in a_dut_port and \ - "Ethernet-IB" not in a_dut_port and "Ethernet-BP" not in a_dut_port: - dut_port = a_dut_port - break + ports_map[ptf_port] = { 'target_dut': [target_dut_index], 'target_dest_mac': router_mac, 'target_src_mac': [router_mac], - 'dut_port': dut_port, 'asic_idx': asic_idx } + _, asic_mg_facts = duts_minigraph_facts[target_hostname][asic_idx] + for a_dut_port, a_dut_port_index in asic_mg_facts['minigraph_port_indices'].items(): + if a_dut_port_index == target_dut_port and "Ethernet-Rec" not in a_dut_port and \ + "Ethernet-IB" not in a_dut_port and "Ethernet-BP" not in a_dut_port: + dut_port = a_dut_port + break + + ports_map[ptf_port]['dut_port'] = dut_port + logger.debug('ptf_test_port_map={}'.format(json.dumps(ports_map, indent=2))) ptfhost.copy(content=json.dumps(ports_map), dest=PTF_TEST_PORT_MAP) diff --git a/tests/conftest.py b/tests/conftest.py index 790abdf969c..c144d7991d8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,6 +28,7 @@ from tests.common.fixtures.duthost_utils import backup_and_restore_config_db_session from tests.common.fixtures.ptfhost_utils import ptf_portmap_file # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import run_icmp_responder_session # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import ptf_test_port_map_active_active # noqa F401 from tests.common.helpers.constants import ( ASIC_PARAM_TYPE_ALL, ASIC_PARAM_TYPE_FRONTEND, DEFAULT_ASIC_ID, ASICS_PRESENT @@ -1599,6 +1600,59 @@ def duts_running_config_facts(duthosts): return cfg_facts @pytest.fixture(scope='class') +def dut_test_params_qos(duthosts, tbinfo, ptfhost, get_src_dst_asic_and_duts, lower_tor_host, creds, + mux_server_url, mux_status_from_nic_simulator, duts_running_config_facts, duts_minigraph_facts): + if 'dualtor' in tbinfo['topo']['name']: + all_duts = [lower_tor_host] + else: + all_duts = get_src_dst_asic_and_duts['all_duts'] + + src_asic = get_src_dst_asic_and_duts['src_asic'] + dst_asic = get_src_dst_asic_and_duts['dst_asic'] + + src_dut = get_src_dst_asic_and_duts['src_dut'] + src_dut_ip = src_dut.host.options['inventory_manager'].get_host(src_dut.hostname).vars['ansible_host'] + src_server = "{}:{}".format(src_dut_ip, src_asic.get_rpc_port_ssh_tunnel()) + + duthost = all_duts[0] + mgFacts = duthost.get_extended_minigraph_facts(tbinfo) + topo = tbinfo["topo"]["name"] + + rtn_dict = { + "topo": topo, + "hwsku": mgFacts["minigraph_hwsku"], + "basicParams": { + "router_mac": duthost.facts["router_mac"], + "src_server" : src_server, + "port_map_file": ptf_test_port_map_active_active( + ptfhost, tbinfo, duthosts, mux_server_url, + duts_running_config_facts, duts_minigraph_facts, + mux_status_from_nic_simulator()), + "sonic_asic_type": duthost.facts['asic_type'], + "sonic_version": duthost.os_version, + "src_dut_index": get_src_dst_asic_and_duts['src_dut_index'], + "src_asic_index": get_src_dst_asic_and_duts['src_asic_index'], + "dst_dut_index": get_src_dst_asic_and_duts['dst_dut_index'], + "dst_asic_index": get_src_dst_asic_and_duts['dst_asic_index'], + "dut_username": creds['sonicadmin_user'], + "dut_password": creds['sonicadmin_password'] + }, + + } + + # Add dst server info if src and dst asic are different + if src_asic != dst_asic: + dst_dut = get_src_dst_asic_and_duts['dst_dut'] + dst_dut_ip = dst_dut.host.options['inventory_manager'].get_host(dst_dut.hostname).vars['ansible_host'] + rtn_dict["basicParams"]["dst_server"] = "{}:{}".format(dst_dut_ip, dst_asic.get_rpc_port_ssh_tunnel()) + + if 'platform_asic' in duthost.facts: + rtn_dict['basicParams']["platform_asic"] = duthost.facts['platform_asic'] + + yield rtn_dict + + +@ pytest.fixture(scope='class') def dut_test_params(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, ptf_portmap_file, lower_tor_host, creds): # noqa F811 """ diff --git a/tests/qos/files/mellanox/qos_param_generator.py b/tests/qos/files/mellanox/qos_param_generator.py index 7980f87ba47..1c7b16e287b 100644 --- a/tests/qos/files/mellanox/qos_param_generator.py +++ b/tests/qos/files/mellanox/qos_param_generator.py @@ -1,7 +1,9 @@ import math class QosParamMellanox(object): - def __init__(self, qos_params, asic_type, speed_cable_len, dutConfig, ingressLosslessProfile, ingressLossyProfile, egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize, dualTor): + def __init__(self, qos_params, asic_type, speed_cable_len, dutConfig, ingressLosslessProfile, + ingressLossyProfile, egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize, + dualTor, src_dut_index, src_asic_index, dst_asic_index, dst_dut_index): self.asic_param_dic = { 'spc1': { 'cell_size': 96, @@ -44,7 +46,10 @@ def __init__(self, qos_params, asic_type, speed_cable_len, dutConfig, ingressLos self.sharedHeadroomPoolSize = None self.dutConfig = dutConfig self.dualTor = dualTor - + self.src_dut_index = src_dut_index + self.src_asic_index = src_asic_index + self.dst_dut_index = dst_dut_index + self.dst_asic_index = dst_asic_index return def run(self): @@ -88,18 +93,19 @@ def collect_qos_configurations(self): pkts_num_trig_egr_drp = egress_lossy_size + 1 if self.sharedHeadroomPoolSize: - testPortIds = self.dutConfig['testPortIds'] + src_testPortIds = self.dutConfig['testPortIds'][self.src_dut_index][self.src_asic_index] + dst_testPortIds = self.dutConfig['testPortIds'][self.dst_dut_index][self.dst_asic_index] ingress_ports_num_shp = 8 pkts_num_trig_pfc_shp = [] ingress_ports_list_shp = [] occupancy_per_port = ingress_lossless_size - self.qos_parameters['dst_port_id'] = testPortIds[0] + self.qos_parameters['dst_port_id'] = dst_testPortIds[0] pgs_per_port = 2 if not self.dualTor else 4 for i in range(1, ingress_ports_num_shp): for j in range(pgs_per_port): pkts_num_trig_pfc_shp.append(occupancy_per_port + xon + hysteresis) occupancy_per_port /= 2 - ingress_ports_list_shp.append(testPortIds[i]) + ingress_ports_list_shp.append(src_testPortIds[i]) self.qos_parameters['pkts_num_trig_pfc_shp'] = pkts_num_trig_pfc_shp self.qos_parameters['src_port_ids'] = ingress_ports_list_shp self.qos_parameters['pkts_num_hdrm_full'] = xoff - 2 @@ -209,4 +215,4 @@ def calculate_parameters(self): self.qos_params_mlnx['ecn_{}'.format(i+1)]['cell_size'] = self.cell_size self.qos_params_mlnx['shared-headroom-pool'] = self.sharedHeadroomPoolSize - self.qos_params_mlnx['pkts_num_private_headrooom'] = self.asic_param_dic[self.asic_type]['private_headroom'] \ No newline at end of file + self.qos_params_mlnx['pkts_num_private_headrooom'] = self.asic_param_dic[self.asic_type]['private_headroom'] diff --git a/tests/qos/qos_helpers.py b/tests/qos/qos_helpers.py index 8003e36d6d9..cc1e4a838c7 100644 --- a/tests/qos/qos_helpers.py +++ b/tests/qos/qos_helpers.py @@ -35,6 +35,8 @@ def eos_to_linux_intf(eos_intf_name, hwsku=None): """ if hwsku == "MLNX-OS": linux_intf_name = eos_intf_name.replace("ernet 1/", "sl1p").replace("/", "sp") + elif hwsku and "Nokia" in hwsku: + linux_intf_name = eos_intf_name else: linux_intf_name = eos_intf_name.replace('Ethernet', 'et').replace('/', '_') return linux_intf_name diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py index aa33e8e486f..fe72bd42af7 100644 --- a/tests/qos/qos_sai_base.py +++ b/tests/qos/qos_sai_base.py @@ -5,7 +5,13 @@ import re import yaml -from tests.common.fixtures.ptfhost_utils import ptf_portmap_file # lgtm[py/unused-import] +import random +import os +import sys +import copy + +from tests.common.fixtures.ptfhost_utils import ptf_portmap_file # noqa F401 +import copy from tests.common.helpers.assertions import pytest_assert, pytest_require from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice from tests.common.cisco_data import is_cisco_device @@ -57,42 +63,40 @@ def isBufferInApplDb(self, dut_asic): return self.buffer_model @pytest.fixture(scope='class', autouse=True) - def dutTestParams(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, - dut_test_params, tbinfo): + def dutTestParams(self, duthosts, dut_test_params_qos, tbinfo, get_src_dst_asic_and_duts): """ Prepares DUT host test params Returns: dutTestParams (dict): DUT host test params """ # update router mac - dut_test_params["basicParams"]["asic_id"] = enum_frontend_asic_index - if dut_test_params["topo"] in self.SUPPORTED_T0_TOPOS: - dut_test_params["basicParams"]["router_mac"] = '' + if dut_test_params_qos["topo"] in self.SUPPORTED_T0_TOPOS: + dut_test_params_qos["basicParams"]["router_mac"] = '' elif "dualtor" in tbinfo["topo"]["name"]: # For dualtor qos test scenario, DMAC of test traffic is default vlan interface's MAC address. # To reduce duplicated code, put "is_dualtor" and "def_vlan_mac" into dutTestParams['basicParams']. - dut_test_params["basicParams"]["is_dualtor"] = True + dut_test_params_qos["basicParams"]["is_dualtor"] = True vlan_cfgs = tbinfo['topo']['properties']['topology']['DUT']['vlan_configs'] if vlan_cfgs and 'default_vlan_config' in vlan_cfgs: default_vlan_name = vlan_cfgs['default_vlan_config'] if default_vlan_name: for vlan in vlan_cfgs[default_vlan_name].values(): if 'mac' in vlan and vlan['mac']: - dut_test_params["basicParams"]["def_vlan_mac"] = vlan['mac'] + dut_test_params_qos["basicParams"]["def_vlan_mac"] = vlan['mac'] break - pytest_assert(dut_test_params["basicParams"]["def_vlan_mac"] is not None, "Dual-TOR miss default VLAN MAC address") + pytest_assert(dut_test_params_qos["basicParams"]["def_vlan_mac"] is not None, "Dual-TOR miss default VLAN MAC address") else: try: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + duthost = get_src_dst_asic_and_duts['src_dut'] asic = duthost.asic_instance().asic_index - dut_test_params['basicParams']["router_mac"] = duthost.shell( + dut_test_params_qos['basicParams']["router_mac"] = duthost.shell( 'sonic-db-cli -n asic{} CONFIG_DB hget "DEVICE_METADATA|localhost" mac'.format(asic))['stdout'] except RunAnsibleModuleFail: - dut_test_params['basicParams']["router_mac"] = duthost.shell( + dut_test_params_qos['basicParams']["router_mac"] = duthost.shell( 'sonic-db-cli CONFIG_DB hget "DEVICE_METADATA|localhost" mac')['stdout'] - yield dut_test_params + yield dut_test_params_qos def runPtfTest(self, ptfhost, testCase='', testParams={}): """ @@ -428,7 +432,136 @@ def __assignTestPortIps(self, mgFacts): return dutPortIps - def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_port_ids): + @pytest.fixture(scope='class') + def swapSyncd_on_selected_duts(self, request, duthosts, get_src_dst_asic_and_duts, creds, tbinfo, lower_tor_host): + """ + Swap syncd on DUT host + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + None + """ + swapSyncd = request.config.getoption("--qos_swap_syncd") + public_docker_reg = request.config.getoption("--public_docker_registry") + try: + if swapSyncd: + if public_docker_reg: + new_creds = copy.deepcopy(creds) + new_creds['docker_registry_host'] = new_creds['public_docker_registry_host'] + new_creds['docker_registry_username'] = '' + new_creds['docker_registry_password'] = '' + else: + new_creds = creds + for duthost in get_src_dst_asic_and_duts["all_duts"]: + docker.swap_syncd(duthost, new_creds) + yield + finally: + if swapSyncd: + for duthost in get_src_dst_asic_and_duts["all_duts"]: + docker.restore_default_syncd(duthost, new_creds) + + @pytest.fixture(scope='class', name="select_src_dst_dut_and_asic", + params=("single_asic", "single_dut_multi_asic", "multi_dut")) + def select_src_dst_dut_and_asic(self, duthosts, request, tbinfo, lower_tor_host): + test_port_selection_criteria = request.param + logger.info("test_port_selection_criteria is {}".format(test_port_selection_criteria)) + src_dut_index = 0 + dst_dut_index = 0 + src_asic_index = 0 + dst_asic_index = 0 + topo = tbinfo["topo"]["name"] + if 'dualtor' in tbinfo['topo']['name']: + # index of lower_tor_host + for a_dut_index in range(len(duthosts)): + if duthosts[a_dut_index] == lower_tor_host: + lower_tor_dut_index = a_dut_index + break + + duthost = duthosts.frontend_nodes[0] + if test_port_selection_criteria == 'single_asic': + # We should randomly pick a dut from duthosts.frontend_nodes and a random asic in that selected DUT + # for now hard code the first DUT and the first asic + if 'dualtor' in tbinfo['topo']['name']: + src_dut_index = lower_tor_dut_index + else: + src_dut_index = 0 + dst_dut_index = src_dut_index + src_asic_index = 0 + dst_asic_index = 0 + + elif test_port_selection_criteria == "single_dut_multi_asic": + if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(duthost): + pytest.skip("single_dut_multi_asic is not supported on T0 topologies") + found_multi_asic_dut = False + for a_dut_index in range(len(duthosts.frontend_nodes)): + a_dut = duthosts.frontend_nodes[a_dut_index] + if a_dut.sonichost.is_multi_asic: + src_dut_index = a_dut_index + dst_dut_index = a_dut_index + src_asic_index = 0 + dst_asic_index = 1 + found_multi_asic_dut = True + logger.info ("Using dut {} for single_dut_multi_asic testing".format(a_dut.hostname)) + break + if not found_multi_asic_dut: + pytest.skip("Did not find any frontend node that is multi-asic - so can't run single_dut_multi_asic tests") + else: + # Dealing with multi-dut + if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(duthost): + pytest.skip("multi-dut is not supported on T0 topologies") + elif topo in self.SUPPORTED_T1_TOPOS: + pytest.skip("multi-dut is not supported on T1 topologies") + + if (len(duthosts.frontend_nodes)) < 2: + pytest.skip("Don't have 2 frontend nodes - so can't run multi_dut tests") + + src_dut_index = 0 + dst_dut_index = 1 + src_asic_index = 0 + dst_asic_index = 0 + + yield { + "src_dut_index": src_dut_index, + "dst_dut_index": dst_dut_index, + "src_asic_index": src_asic_index, + "dst_asic_index": dst_asic_index + } + + @pytest.fixture(scope='class') + def get_src_dst_asic_and_duts(self, duthosts, tbinfo, select_src_dst_dut_and_asic, lower_tor_host): + if 'dualtor' in tbinfo['topo']['name']: + src_dut = lower_tor_host + dst_dut = lower_tor_host + else: + src_dut = duthosts.frontend_nodes[select_src_dst_dut_and_asic["src_dut_index"]] + dst_dut = duthosts.frontend_nodes[select_src_dst_dut_and_asic["dst_dut_index"]] + + src_asic = src_dut.asics[select_src_dst_dut_and_asic["src_asic_index"]] + dst_asic = dst_dut.asics[select_src_dst_dut_and_asic["dst_asic_index"]] + + all_asics = [src_asic] + if src_asic != dst_asic: + all_asics.append(dst_asic) + + all_duts = [src_dut] + if src_dut != dst_dut: + all_duts.append(dst_dut) + + rtn_dict = { + "src_asic": src_asic, + "dst_asic": dst_asic, + "src_dut": src_dut, + "dst_dut": dst_dut, + "all_asics": all_asics, + "all_duts": all_duts + } + rtn_dict.update(select_src_dst_dut_and_asic) + yield rtn_dict + + def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_port_ids, get_src_dst_asic_and_duts): """ Build map of test ports index and IPs @@ -443,16 +576,27 @@ def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_ dstPorts = request.config.getoption("--qos_dst_ports") srcPorts = request.config.getoption("--qos_src_ports") + src_dut_port_ids = testPortIds[get_src_dst_asic_and_duts['src_dut_index']] + src_test_port_ids = src_dut_port_ids[get_src_dst_asic_and_duts['src_asic_index']] + dst_dut_port_ids = testPortIds[get_src_dst_asic_and_duts['dst_dut_index']] + dst_test_port_ids = dst_dut_port_ids[get_src_dst_asic_and_duts['dst_asic_index']] + + src_dut_port_ips = testPortIps[get_src_dst_asic_and_duts['src_dut_index']] + src_test_port_ips = src_dut_port_ips[get_src_dst_asic_and_duts['src_asic_index']] + dst_dut_port_ips = testPortIps[get_src_dst_asic_and_duts['dst_dut_index']] + dst_test_port_ips = dst_dut_port_ips[get_src_dst_asic_and_duts['dst_asic_index']] + + if dstPorts is None: if dst_port_ids: pytest_assert( - len(set(testPortIds).intersection(set(dst_port_ids))) == len(set(dst_port_ids)), - "Dest port id passed in qos.yml not valid" + len(set(dst_test_port_ids).intersection(set(dst_port_ids))) == len(set(dst_port_ids)), + "Dest port id passed in qos.yml not valid" ) dstPorts = dst_port_ids - elif len(testPortIds) >= 4: + elif len(dst_test_port_ids) >= 4: dstPorts = [0, 2, 3] - elif len(testPortIds) == 3: + elif len(dst_test_port_ids) == 3: dstPorts = [0, 2, 2] else: dstPorts = [0, 0, 0] @@ -460,15 +604,15 @@ def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_ if srcPorts is None: if src_port_ids: pytest_assert( - len(set(testPortIds).intersection(set(src_port_ids))) == len(set(src_port_ids)), - "Source port id passed in qos.yml not valid" - ) + len(set(src_test_port_ids).intersection(set(src_port_ids))) == len(set(src_port_ids)), + "Source port id passed in qos.yml not valid" + ) # To verify ingress lossless speed/cable-length randomize the source port. srcPorts = [random.choice(src_port_ids)] else: srcPorts = [1] - pytest_assert(len(testPortIds) >= 2, "Provide at least 2 test ports") + pytest_assert(len(dst_test_port_ids) >= 1 and len(src_test_port_ids) >= 1, "Provide at least 2 test ports") logging.debug( "Test Port IDs:{} IPs:{}".format(testPortIds, testPortIps) ) @@ -481,36 +625,34 @@ def __buildTestPorts(self, request, testPortIds, testPortIps, src_port_ids, dst_ ) ) - - #TODO: Randomize port selection - dstPort = dstPorts[0] if dst_port_ids else testPortIds[dstPorts[0]] - dstVlan = testPortIps[dstPort]['vlan_id'] if 'vlan_id' in testPortIps[dstPort] else None - dstPort2 = dstPorts[1] if dst_port_ids else testPortIds[dstPorts[1]] - dstVlan2 = testPortIps[dstPort2]['vlan_id'] if 'vlan_id' in testPortIps[dstPort2] else None - dstPort3 = dstPorts[2] if dst_port_ids else testPortIds[dstPorts[2]] - dstVlan3 = testPortIps[dstPort3]['vlan_id'] if 'vlan_id' in testPortIps[dstPort3] else None - srcPort = srcPorts[0] if src_port_ids else testPortIds[srcPorts[0]] - srcVlan = testPortIps[srcPort]['vlan_id'] if 'vlan_id' in testPortIps[srcPort] else None + # TODO: Randomize port selection + dstPort = dstPorts[0] if dst_port_ids else dst_test_port_ids[dstPorts[0]] + dstVlan = dst_test_port_ips[dstPort]['vlan_id'] if 'vlan_id' in dst_test_port_ips[dstPort] else None + dstPort2 = dstPorts[1] if dst_port_ids else dst_test_port_ids[dstPorts[1]] + dstVlan2 = dst_test_port_ips[dstPort2]['vlan_id'] if 'vlan_id' in dst_test_port_ips[dstPort2] else None + dstPort3 = dstPorts[2] if dst_port_ids else dst_test_port_ids[dstPorts[2]] + dstVlan3 = dst_test_port_ips[dstPort3]['vlan_id'] if 'vlan_id' in dst_test_port_ips[dstPort3] else None + srcPort = srcPorts[0] if src_port_ids else src_test_port_ids[srcPorts[0]] + srcVlan = src_test_port_ips[srcPort]['vlan_id'] if 'vlan_id' in src_test_port_ips[srcPort] else None return { "dst_port_id": dstPort, - "dst_port_ip": testPortIps[dstPort]['peer_addr'], + "dst_port_ip": dst_test_port_ips[dstPort]['peer_addr'], "dst_port_vlan": dstVlan, "dst_port_2_id": dstPort2, - "dst_port_2_ip": testPortIps[dstPort2]['peer_addr'], + "dst_port_2_ip": dst_test_port_ips[dstPort2]['peer_addr'], "dst_port_2_vlan": dstVlan2, 'dst_port_3_id': dstPort3, - "dst_port_3_ip": testPortIps[dstPort3]['peer_addr'], + "dst_port_3_ip": dst_test_port_ips[dstPort3]['peer_addr'], "dst_port_3_vlan": dstVlan3, - "src_port_id": srcPorts[0] if src_port_ids else testPortIds[srcPorts[0]], - "src_port_ip": testPortIps[srcPorts[0] if src_port_ids else testPortIds[srcPorts[0]]]["peer_addr"], + "src_port_id": srcPort, + "src_port_ip": src_test_port_ips[srcPorts[0] if src_port_ids else src_test_port_ids[srcPorts[0]]]["peer_addr"], "src_port_vlan": srcVlan } @pytest.fixture(scope='class', autouse=True) def dutConfig( - self, request, duthosts, enum_rand_one_per_hwsku_frontend_hostname, - enum_frontend_asic_index, lower_tor_host, tbinfo, dualtor_ports, dut_qos_maps - ): + self, request, duthosts, get_src_dst_asic_and_duts, + lower_tor_host, tbinfo, dualtor_ports_for_duts, dut_qos_maps): # noqa F811 """ Build DUT host config pertaining to QoS SAI tests @@ -522,16 +664,16 @@ def dutConfig( dutConfig (dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, and test ports """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) - dut_asic = duthost.asic_instance(enum_frontend_asic_index) - dutLagInterfaces = [] + """ + Below are dictionaries with key being dut_index and value a dictionary with key asic_index + Example for 2 DUTs with 2 asics each + { 0: { 0: , 1: }, 1: { 0: , 1: }} + """ dutPortIps = {} testPortIps = {} + testPortIds = {} + dualTorPortIndexes = {} uplinkPortIds = [] uplinkPortIps = [] uplinkPortNames = [] @@ -539,54 +681,62 @@ def dutConfig( downlinkPortIps = [] downlinkPortNames = [] - mgFacts = duthost.get_extended_minigraph_facts(tbinfo) + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + src_dut = get_src_dst_asic_and_duts['src_dut'] + src_mgFacts = src_dut.get_extended_minigraph_facts(tbinfo) topo = tbinfo["topo"]["name"] - dualTorPortIndexes = [] - testPortIds = [] # LAG ports in T1 TOPO need to be removed in Mellanox devices - if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(duthost): + if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(src_dut): + # Only single asic is supported for this scenario, so use src_dut and src_asic - which will be the same + # as dst_dut and dst_asic pytest_assert( - not duthost.sonichost.is_multi_asic, "Fixture not supported on T0 multi ASIC" + not src_dut.sonichost.is_multi_asic, "Fixture not supported on T0 multi ASIC" ) - for _, lag in mgFacts["minigraph_portchannels"].items(): + dutLagInterfaces = [] + testPortIds[src_dut_index] = {} + for _, lag in src_mgFacts["minigraph_portchannels"].items(): for intf in lag["members"]: - dutLagInterfaces.append(mgFacts["minigraph_ptf_indices"][intf]) + dutLagInterfaces.append(src_mgFacts["minigraph_ptf_indices"][intf]) - testPortIds = set(mgFacts["minigraph_ptf_indices"][port] - for port in mgFacts["minigraph_ports"].keys()) - testPortIds -= set(dutLagInterfaces) - if isMellanoxDevice(duthost): + testPortIds[src_dut_index][src_asic_index] = set(src_mgFacts["minigraph_ptf_indices"][port] + for port in src_mgFacts["minigraph_ports"].keys()) + testPortIds[src_dut_index][src_asic_index] -= set(dutLagInterfaces) + if isMellanoxDevice(src_dut): # The last port is used for up link from DUT switch - testPortIds -= {len(mgFacts["minigraph_ptf_indices"]) - 1} - testPortIds = sorted(testPortIds) - pytest_require(len(testPortIds) != 0, "Skip test since no ports are available for testing") + testPortIds[src_dut_index][src_asic_index] -= {len(src_mgFacts["minigraph_ptf_indices"]) - 1} + testPortIds[src_dut_index][src_asic_index] = sorted(testPortIds[src_dut_index][src_asic_index]) + pytest_require(len(testPortIds[src_dut_index][src_asic_index]) != 0, "Skip test since no ports are available for testing") # get current DUT port IPs - dutPortIps = {} + dutPortIps[src_dut_index] = {} + dutPortIps[src_dut_index][src_asic_index] = {} + dualTorPortIndexes[src_dut_index] = {} + dualTorPortIndexes[src_dut_index][src_asic_index] = {} if 'backend' in topo: - intf_map = mgFacts["minigraph_vlan_sub_interfaces"] + intf_map = src_mgFacts["minigraph_vlan_sub_interfaces"] else: - intf_map = mgFacts["minigraph_interfaces"] + intf_map = src_mgFacts["minigraph_interfaces"] - use_separated_upkink_dscp_tc_map = separated_dscp_to_tc_map_on_uplink(duthost, dut_qos_maps) + use_separated_upkink_dscp_tc_map = separated_dscp_to_tc_map_on_uplink(src_dut, dut_qos_maps) for portConfig in intf_map: intf = portConfig["attachto"].split(".")[0] if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4: - portIndex = mgFacts["minigraph_ptf_indices"][intf] - if portIndex in testPortIds: + portIndex = src_mgFacts["minigraph_ptf_indices"][intf] + if portIndex in testPortIds[src_dut_index][src_asic_index]: portIpMap = {'peer_addr': portConfig["peer_addr"]} if 'vlan' in portConfig: portIpMap['vlan_id'] = portConfig['vlan'] - dutPortIps.update({portIndex: portIpMap}) - if intf in dualtor_ports: - dualTorPortIndexes.append(portIndex) + dutPortIps[src_dut_index][src_asic_index].update({portIndex: portIpMap}) + if intf in dualtor_ports_for_duts: + dualTorPortIndexes[src_dut_index][src_asic_index].append(portIndex) # If the leaf router is using separated DSCP_TO_TC_MAP on uplink/downlink ports. # we also need to test them separately # for mellanox device, we run it on t1 topo mocked by ptf32 topo - if use_separated_upkink_dscp_tc_map and isMellanoxDevice(duthost): - neighName = mgFacts["minigraph_neighbors"].get(intf, {}).get("name", "").lower() + if use_separated_upkink_dscp_tc_map and isMellanoxDevice(src_dut): + neighName = src_mgFacts["minigraph_neighbors"].get(intf, {}).get("name", "").lower() if 't0' in neighName: downlinkPortIds.append(portIndex) downlinkPortIps.append(portConfig["peer_addr"]) @@ -596,77 +746,128 @@ def dutConfig( uplinkPortIps.append(portConfig["peer_addr"]) uplinkPortNames.append(intf) - testPortIps = self.__assignTestPortIps(mgFacts) + testPortIps[src_dut_index] = {} + testPortIps[src_dut_index][src_asic_index] = self.__assignTestPortIps(src_mgFacts) + + # restore currently assigned IPs + if len(dutPortIps[src_dut_index][src_asic_index]) != 0: + testPortIps.update(dutPortIps) elif topo in self.SUPPORTED_T1_TOPOS: - use_separated_upkink_dscp_tc_map = separated_dscp_to_tc_map_on_uplink(duthost, dut_qos_maps) - for iface,addr in dut_asic.get_active_ip_interfaces(tbinfo).items(): - vlan_id = None - if iface.startswith("Ethernet"): - portName = iface - if "." in iface: - portName, vlan_id = iface.split(".") - portIndex = mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"]} - if vlan_id is not None: - portIpMap['vlan_id'] = vlan_id - dutPortIps.update({portIndex: portIpMap}) - elif iface.startswith("PortChannel"): - portName = next( - iter(mgFacts["minigraph_portchannels"][iface]["members"]) - ) - portIndex = mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"]} - dutPortIps.update({portIndex: portIpMap}) - # If the leaf router is using separated DSCP_TO_TC_MAP on uplink/downlink ports. - # we also need to test them separately - if use_separated_upkink_dscp_tc_map: - neighName = mgFacts["minigraph_neighbors"].get(portName, {}).get("name", "").lower() - if 't0' in neighName: - downlinkPortIds.append(portIndex) - downlinkPortIps.append(addr["peer_ipv4"]) - downlinkPortNames.append(portName) - elif 't2' in neighName: - uplinkPortIds.append(portIndex) - uplinkPortIps.append(addr["peer_ipv4"]) - uplinkPortNames.append(portName) - - testPortIds = sorted(dutPortIps.keys()) + + # T1 is supported only for 'single_asic' or 'single_dut_multi_asic'. + # So use src_dut as the dut + use_separated_upkink_dscp_tc_map = separated_dscp_to_tc_map_on_uplink(src_dut, dut_qos_maps) + dutPortIps[src_dut_index] = {} + testPortIds[src_dut_index] = {} + for dut_asic in get_src_dst_asic_and_duts['all_asics']: + dutPortIps[src_dut_index][dut_asic.asic_index] = {} + for iface,addr in dut_asic.get_active_ip_interfaces(tbinfo).items(): + vlan_id = None + if iface.startswith("Ethernet"): + portName = iface + if "." in iface: + portName, vlan_id = iface.split(".") + portIndex = src_mgFacts["minigraph_ptf_indices"][portName] + portIpMap = {'peer_addr': addr["peer_ipv4"]} + if vlan_id is not None: + portIpMap['vlan_id'] = vlan_id + dutPortIps[src_dut_index][dut_asic.asic_index].update({portIndex: portIpMap}) + elif iface.startswith("PortChannel"): + portName = next( + iter(src_mgFacts["minigraph_portchannels"][iface]["members"]) + ) + portIndex = src_mgFacts["minigraph_ptf_indices"][portName] + portIpMap = {'peer_addr': addr["peer_ipv4"]} + dutPortIps[src_dut_index][dut_asic.asic_index].update({portIndex: portIpMap}) + # If the leaf router is using separated DSCP_TO_TC_MAP on uplink/downlink ports. + # we also need to test them separately + if use_separated_upkink_dscp_tc_map: + neighName = src_mgFacts["minigraph_neighbors"].get(portName, {}).get("name", "").lower() + if 't0' in neighName: + downlinkPortIds.append(portIndex) + downlinkPortIps.append(addr["peer_ipv4"]) + downlinkPortNames.append(portName) + elif 't2' in neighName: + uplinkPortIds.append(portIndex) + uplinkPortIps.append(addr["peer_ipv4"]) + uplinkPortNames.append(portName) + + testPortIds[src_dut_index][dut_asic.asic_index] = sorted(dutPortIps[src_dut_index][dut_asic.asic_index].keys()) + + # Need to fix this + testPortIps[src_dut_index] = {} + testPortIps[src_dut_index][src_asic_index] = self.__assignTestPortIps(src_mgFacts) + + # restore currently assigned IPs + if len(dutPortIps[src_dut_index][src_asic_index]) != 0: + testPortIps.update(dutPortIps) elif tbinfo["topo"]["type"] == "t2": - for iface,addr in dut_asic.get_active_ip_interfaces(tbinfo).items(): - vlan_id = None + src_asic = get_src_dst_asic_and_duts['src_asic'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + dst_asic = get_src_dst_asic_and_duts['dst_asic'] + + # Lets get data for the src dut and src asic + dutPortIps[src_dut_index] = {} + testPortIds[src_dut_index] = {} + dutPortIps[src_dut_index][src_asic_index] = {} + active_ips = src_asic.get_active_ip_interfaces(tbinfo) + for iface,addr in active_ips.items(): if iface.startswith("Ethernet") and ("Ethernet-Rec" not in iface): - if "." in iface: - iface, vlan_id = iface.split(".") - portIndex = mgFacts["minigraph_ptf_indices"][iface] - portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': iface } - if vlan_id is not None: - portIpMap['vlan_id'] = vlan_id - dutPortIps.update({portIndex: portIpMap}) + portIndex = src_mgFacts["minigraph_ptf_indices"][iface] + portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': iface} + dutPortIps[src_dut_index][src_asic_index].update({portIndex: portIpMap}) elif iface.startswith("PortChannel"): portName = next( - iter(mgFacts["minigraph_portchannels"][iface]["members"]) + iter(src_mgFacts["minigraph_portchannels"][iface]["members"]) ) - portIndex = mgFacts["minigraph_ptf_indices"][portName] - portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': portName } - dutPortIps.update({portIndex: portIpMap}) - - testPortIds = sorted(dutPortIps.keys()) - - # restore currently assigned IPs - testPortIps.update(dutPortIps) + portIndex = src_mgFacts["minigraph_ptf_indices"][portName] + portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': portName} + dutPortIps[src_dut_index][src_asic_index].update({portIndex: portIpMap}) + + testPortIds[src_dut_index][src_asic_index] = sorted(dutPortIps[src_dut_index][src_asic_index].keys()) + + if dst_asic != src_asic: + # Dealing with different asic + dst_dut = get_src_dst_asic_and_duts['dst_dut'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + if dst_dut_index != src_dut_index: + dst_mgFacts = dst_dut.get_extended_minigraph_facts(tbinfo) + dutPortIps[dst_dut_index] = {} + testPortIds[dst_dut_index] = {} + else: + dst_mgFacts = src_mgFacts + dutPortIps[dst_dut_index][dst_asic_index] = {} + active_ips = dst_asic.get_active_ip_interfaces(tbinfo) + for iface, addr in active_ips.items(): + if iface.startswith("Ethernet") and ("Ethernet-Rec" not in iface): + portIndex = dst_mgFacts["minigraph_ptf_indices"][iface] + portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': iface} + dutPortIps[dst_dut_index][dst_asic_index].update({portIndex: portIpMap}) + elif iface.startswith("PortChannel"): + portName = next( + iter(dst_mgFacts["minigraph_portchannels"][iface]["members"]) + ) + portIndex = dst_mgFacts["minigraph_ptf_indices"][portName] + portIpMap = {'peer_addr': addr["peer_ipv4"], 'port': portName} + dutPortIps[dst_dut_index][dst_asic_index].update({portIndex: portIpMap}) + + testPortIds[dst_dut_index][dst_asic_index] = sorted(dutPortIps[dst_dut_index][dst_asic_index].keys()) + + # restore currently assigned IPs + testPortIps.update(dutPortIps) qosConfigs = {} with open(r"qos/files/qos.yml") as file: qosConfigs = yaml.load(file, Loader=yaml.FullLoader) - - vendor = duthost.facts["asic_type"] - hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] + # Assuming the same chipset for all DUTs so can use src_dut to get asic type + vendor = src_dut.facts["asic_type"] + hostvars = src_dut.host.options['variable_manager']._hostvars[src_dut.hostname] dutAsic = None for asic in self.SUPPORTED_ASIC_LIST: vendorAsic = "{0}_{1}_hwskus".format(vendor, asic) - if vendorAsic in hostvars.keys() and mgFacts["minigraph_hwsku"] in hostvars[vendorAsic]: + if vendorAsic in hostvars.keys() and src_mgFacts["minigraph_hwsku"] in hostvars[vendorAsic]: dutAsic = asic break @@ -686,10 +887,10 @@ def dutConfig( src_port_ids = None dst_port_ids = None try: - if "src_port_ids" in qosConfigs['qos_params'][dutAsic][dutTopo]: + if "src_port_ids" in qosConfigs['qos_params'][dutAsic][dutTopo]: src_port_ids = qosConfigs['qos_params'][dutAsic][dutTopo]["src_port_ids"] - if "dst_port_ids" in qosConfigs['qos_params'][dutAsic][dutTopo]: + if "dst_port_ids" in qosConfigs['qos_params'][dutAsic][dutTopo]: dst_port_ids = qosConfigs['qos_params'][dutAsic][dutTopo]["dst_port_ids"] except KeyError: pass @@ -698,7 +899,7 @@ def dutConfig( if dualTor: testPortIds = dualTorPortIndexes - testPorts = self.__buildTestPorts(request, testPortIds, testPortIps, src_port_ids, dst_port_ids) + testPorts = self.__buildTestPorts(request, testPortIds, testPortIps, src_port_ids, dst_port_ids, get_src_dst_asic_and_duts) # Update the uplink/downlink ports to testPorts testPorts.update({ "uplink_port_ids": uplinkPortIds, @@ -707,16 +908,20 @@ def dutConfig( "downlink_port_ids": downlinkPortIds, "downlink_port_ips": downlinkPortIps, "downlink_port_names": downlinkPortNames - }) + }) dutinterfaces = {} if tbinfo["topo"]["type"] == "t2": - for ptf_port, ptf_val in dutPortIps.items(): - dutinterfaces[ptf_port] = ptf_val['port'] + #dutportIps={0: {0: {0: {'peer_addr': u'10.0.0.1', 'port': u'Ethernet8'}, 2: {'peer_addr': u'10.0.0.5', 'port': u'Ethernet17'}}}} + # { 0: 'Ethernet8', 2: 'Ethernet17' } + for dut_index,dut_val in dutPortIps.items(): + for asic_index,asic_val in dut_val.items(): + for ptf_port, ptf_val in asic_val.items(): + dutinterfaces[ptf_port] = ptf_val['port'] else: - for port, index in mgFacts["minigraph_ptf_indices"].items(): - if 'Ethernet-Rec' not in port and 'Ethernet-IB' not in port: - dutinterfaces[index] = port + dutinterfaces = { + index: port for port, index in src_mgFacts["minigraph_ptf_indices"].items() + } yield { "dutInterfaces": dutinterfaces, @@ -724,33 +929,28 @@ def dutConfig( "testPortIps": testPortIps, "testPorts": testPorts, "qosConfigs": qosConfigs, - "dutAsic" : dutAsic, - "dutTopo" : dutTopo, - "dutInstance" : duthost, - "dualTor" : request.config.getoption("--qos_dual_tor"), - "dualTorScenario" : len(dualtor_ports) != 0 + "dutAsic": dutAsic, + "dutTopo": dutTopo, + "srcDutInstance" : src_dut, + "dstDutInstance": get_src_dst_asic_and_duts['dst_dut'], + "dualTor": request.config.getoption("--qos_dual_tor"), + "dualTorScenario": len(dualtor_ports_for_duts) != 0 } @pytest.fixture(scope='class') - def ssh_tunnel_to_syncd_rpc( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, - swapSyncd, tbinfo, lower_tor_host - ): - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) - dut_asic.create_ssh_tunnel_sai_rpc() + def ssh_tunnel_to_syncd_rpc(self, duthosts, get_src_dst_asic_and_duts, swapSyncd_on_selected_duts, tbinfo, lower_tor_host): + all_asics = get_src_dst_asic_and_duts['all_asics'] + + for a_asic in all_asics: + a_asic.create_ssh_tunnel_sai_rpc() yield - dut_asic.remove_ssh_tunnel_sai_rpc() + for a_asic in all_asics: + a_asic.remove_ssh_tunnel_sai_rpc() @pytest.fixture(scope='class') - def updateIptables( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, swapSyncd, tbinfo, lower_tor_host - ): + def updateIptables(self, duthosts, get_src_dst_asic_and_duts, swapSyncd_on_selected_duts, tbinfo, lower_tor_host): """ Update iptables on DUT host with drop rule for BGP SYNC packets @@ -761,29 +961,27 @@ def updateIptables( Returns: None """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) + all_asics = get_src_dst_asic_and_duts['all_asics'] - ipVersions = [{"ip_version": "ipv4"}, {"ip_version": "ipv6"}] + ipVersions = [{"ip_version": "ipv4"}, {"ip_version": "ipv6"}] logger.info("Add ip[6]tables rule to drop BGP SYN Packet from peer so that we do not ACK back") for ipVersion in ipVersions: - dut_asic.bgp_drop_rule(state="present", **ipVersion) + for a_asic in all_asics: + a_asic.bgp_drop_rule(state="present", **ipVersion) yield logger.info("Remove ip[6]tables rule to drop BGP SYN Packet from Peer") for ipVersion in ipVersions: - dut_asic.bgp_drop_rule(state="absent", **ipVersion) + for a_asic in all_asics: + a_asic.bgp_drop_rule(state="absent", **ipVersion) @pytest.fixture(scope='class') def stopServices( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, - swapSyncd, enable_container_autorestart, disable_container_autorestart, get_mux_status, - tbinfo, upper_tor_host, lower_tor_host, toggle_all_simulator_ports): # noqa F811 + self, duthosts, get_src_dst_asic_and_duts, + swapSyncd_on_selected_duts, enable_container_autorestart, disable_container_autorestart, get_mux_status, + tbinfo, upper_tor_host, lower_tor_host, toggle_all_simulator_ports): # noqa F811 """ Stop services (lldp-syncs, lldpd, bgpd) on DUT host prior to test start @@ -794,13 +992,14 @@ def stopServices( Returns: None """ + src_asic = get_src_dst_asic_and_duts['src_asic'] + src_dut = get_src_dst_asic_and_duts['src_dut'] + dst_asic = get_src_dst_asic_and_duts['dst_asic'] + dst_dut = get_src_dst_asic_and_duts['dst_dut'] + if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host duthost_upper = upper_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) def updateDockerService(host, docker="", action="", service=""): """ Helper function to update docker services @@ -833,59 +1032,79 @@ def updateDockerService(host, docker="", action="", service=""): validate_check_result(check_result, duthosts, get_mux_status) try: - duthost.shell("ls %s" % file) - duthost.shell("sudo cp {} {}".format(file,backup_file)) - duthost.shell("sudo rm {}".format(file)) - duthost.shell("sudo touch {}".format(file)) + lower_tor_host.shell("ls %s" % file) + lower_tor_host.shell("sudo cp {} {}".format(file,backup_file)) + lower_tor_host.shell("sudo rm {}".format(file)) + lower_tor_host.shell("sudo touch {}".format(file)) except: pytest.skip('file {} not found'.format(file)) duthost_upper.shell('sudo config feature state mux disabled') - duthost.shell('sudo config feature state mux disabled') - - services = [ - {"docker": dut_asic.get_docker_name("lldp"), "service": "lldp-syncd"}, - {"docker": dut_asic.get_docker_name("lldp"), "service": "lldpd"}, - {"docker": dut_asic.get_docker_name("bgp"), "service": "bgpd"}, - {"docker": dut_asic.get_docker_name("bgp"), "service": "bgpmon"}, - {"docker": dut_asic.get_docker_name("radv"), "service": "radvd"}, - {"docker": dut_asic.get_docker_name("swss"), "service": "arp_update"} + lower_tor_host.shell('sudo config feature state mux disabled') + + src_services = [ + {"docker": src_asic.get_docker_name("lldp"), "service": "lldp-syncd"}, + {"docker": src_asic.get_docker_name("lldp"), "service": "lldpd"}, + {"docker": src_asic.get_docker_name("bgp"), "service": "bgpd"}, + {"docker": src_asic.get_docker_name("bgp"), "service": "bgpmon"}, + {"docker": src_asic.get_docker_name("radv"), "service": "radvd"}, + {"docker": src_asic.get_docker_name("swss"), "service": "radvd"} ] + dst_services = [] + if src_asic != dst_asic: + dst_services = [ + {"docker": dst_asic.get_docker_name("lldp"), "service": "lldp-syncd"}, + {"docker": dst_asic.get_docker_name("lldp"), "service": "lldpd"}, + {"docker": dst_asic.get_docker_name("bgp"), "service": "bgpd"}, + {"docker": dst_asic.get_docker_name("bgp"), "service": "bgpmon"}, + {"docker": dst_asic.get_docker_name("radv"), "service": "radvd"}, + {"docker": dst_asic.get_docker_name("swss"), "service": "radvd"}, + ] feature_list = ['lldp', 'bgp', 'syncd', 'swss'] if 'dualtor' in tbinfo['topo']['name']: disable_container_autorestart(duthost_upper, testcase="test_qos_sai", feature_list=feature_list) - disable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list) - for service in services: - updateDockerService(duthost, action="stop", **service) + disable_container_autorestart(src_dut, testcase="test_qos_sai", feature_list=feature_list) + for service in src_services: + updateDockerService(src_dut, action="stop", **service) + if src_asic != dst_asic: + disable_container_autorestart(dst_dut, testcase="test_qos_sai", feature_list=feature_list) + for service in dst_services: + updateDockerService(dst_dut, action="stop", **service) yield - for service in services: - updateDockerService(duthost, action="start", **service) + for service in src_services: + updateDockerService(src_dut, action="start", **service) + if src_asic != dst_asic: + for service in dst_services: + updateDockerService(dst_dut, action="start", **service) """ Start mux conatiner for dual ToR """ if 'dualtor' in tbinfo['topo']['name']: try: - duthost.shell("ls %s" % backup_file) - duthost.shell("sudo cp {} {}".format(backup_file,file)) - duthost.shell("sudo chmod +x {}".format(file)) - duthost.shell("sudo rm {}".format(backup_file)) + + lower_tor_host.shell("ls %s" % backup_file) + lower_tor_host.shell("sudo cp {} {}".format(backup_file,file)) + lower_tor_host.shell("sudo chmod +x {}".format(file)) + lower_tor_host.shell("sudo rm {}".format(backup_file)) except: pytest.skip('file {} not found'.format(backup_file)) - duthost.shell('sudo config feature state mux enabled') - duthost_upper.shell('sudo config feature state mux enabled') + lower_tor_host.shell('sudo config feature state mux enabled') + lower_tor_host.shell('sudo config feature state mux enabled') logger.info("Start mux container for dual ToR testbed") - enable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list) + enable_container_autorestart(src_dut, testcase="test_qos_sai", feature_list=feature_list) + if src_asic != dst_asic: + enable_container_autorestart(dst_dut, testcase="test_qos_sai", feature_list=feature_list) if 'dualtor' in tbinfo['topo']['name']: enable_container_autorestart(duthost_upper, testcase="test_qos_sai", feature_list=feature_list) @pytest.fixture(autouse=True) - def updateLoganalyzerExceptions(self, enum_rand_one_per_hwsku_frontend_hostname, loganalyzer): + def updateLoganalyzerExceptions(self, get_src_dst_asic_and_duts, loganalyzer): """ Update loganalyzer ignore regex list @@ -918,13 +1137,14 @@ def updateLoganalyzerExceptions(self, enum_rand_one_per_hwsku_frontend_hostname, ".*WARNING syncd#SDK:.*sai_set_attribute: Failed attribs check, key:Switch ID.*", ".*WARNING syncd#SDK:.*check_rate: Set max rate to 0.*" ] - loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignoreRegex) + for a_dut in get_src_dst_asic_and_duts['all_duts']: + loganalyzer[a_dut.hostname].ignore_regex.extend(ignoreRegex) yield @pytest.fixture(scope='class', autouse=True) def disablePacketAging( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, stopServices + self, duthosts, get_src_dst_asic_and_duts, stopServices ): """ disable packet aging on DUT host @@ -936,33 +1156,34 @@ def disablePacketAging( Returns: None """ - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + for duthost in get_src_dst_asic_and_duts['all_duts']: + if isMellanoxDevice(duthost): + logger.info("Disable Mellanox packet aging") + duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp") + duthost.command("docker cp /tmp/packets_aging.py syncd:/") + duthost.command("docker exec syncd python /packets_aging.py disable") - if isMellanoxDevice(duthost): - logger.info("Disable Mellanox packet aging") - duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp") - duthost.command("docker cp /tmp/packets_aging.py syncd:/") - duthost.command("docker exec syncd python /packets_aging.py disable") + yield - yield - - if isMellanoxDevice(duthost): - logger.info("Enable Mellanox packet aging") - duthost.command("docker exec syncd python /packets_aging.py enable") - duthost.command("docker exec syncd rm -rf /packets_aging.py") + if isMellanoxDevice(duthost): + logger.info("Enable Mellanox packet aging") + duthost.command("docker exec syncd python /packets_aging.py enable") + duthost.command("docker exec syncd rm -rf /packets_aging.py") def dutArpProxyConfig(self, duthost): # so far, only record ARP proxy config to logging for debug purpose - vlanInterface = {} - try: - vlanInterface = json.loads(duthost.shell('sonic-cfggen -d --var-json "VLAN_INTERFACE"')['stdout']) - except: - logger.info('Failed to read vlan interface config') - if not vlanInterface: - return - for key, value in vlanInterface.items(): - if 'proxy_arp' in value: - logger.info('ARP proxy is {} on {}'.format(value['proxy_arp'], key)) + for a_asic in duthost.asics: + vlanInterface = {} + try: + sonic_cfgen_cmd = 'sonic-cfggen {} -d --var-json "VLAN_INTERFACE"'.format(a_asic.cli_ns_option) + vlanInterface = json.loads(duthost.shell(sonic_cfgen_cmd)['stdout']) + except: + logger.info('Failed to read vlan interface config') + if not vlanInterface: + return + for key, value in vlanInterface.items(): + if 'proxy_arp' in value: + logger.info('ARP proxy is {} on {}'.format(value['proxy_arp'], key)) def dutBufferConfig(self, duthost, dut_asic): bufferConfig = {} @@ -986,7 +1207,7 @@ def dutBufferConfig(self, duthost, dut_asic): @pytest.fixture(scope='class', autouse=True) def dutQosConfig( - self, duthosts, enum_frontend_asic_index, enum_rand_one_per_hwsku_frontend_hostname, + self, duthosts, get_src_dst_asic_and_duts, dutConfig, ingressLosslessProfile, ingressLossyProfile, egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize, tbinfo, lower_tor_host @@ -1002,12 +1223,9 @@ def dutQosConfig( Returns: QoSConfig (dict): Map containing DUT host QoS configuration """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + duthost = get_src_dst_asic_and_duts['src_dut'] + dut_asic = get_src_dst_asic_and_duts['src_asic'] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) mgFacts = duthost.get_extended_minigraph_facts(tbinfo) pytest_assert("minigraph_hwsku" in mgFacts, "Could not find DUT SKU") @@ -1043,13 +1261,17 @@ def dutQosConfig( egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize, - dutConfig["dualTor"] - ) + dutConfig["dualTor"], + get_src_dst_asic_and_duts['src_dut_index'], + get_src_dst_asic_and_duts['src_asic_index'], + get_src_dst_asic_and_duts['dst_dut_index'], + get_src_dst_asic_and_duts['dst_asic_index'] + ) qosParams = qpm.run() elif 'broadcom' in duthost.facts['asic_type'].lower(): if 'platform_asic' in duthost.facts and duthost.facts['platform_asic'] == 'broadcom-dnx': - logger.info("THDI_BUFFER_CELL_LIMIT_SP is not valid for broadcom DNX - ignore dynamic buffer config") + logger.info ("THDI_BUFFER_CELL_LIMIT_SP is not valid for broadcom DNX - ignore dynamic buffer config") qosParams = qosConfigs['qos_params'][dutAsic][dutTopo] else: bufferConfig = self.dutBufferConfig(duthost, dut_asic) @@ -1112,8 +1334,7 @@ def dutQosConfig( @pytest.fixture(scope='class') def releaseAllPorts( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, dutTestParams, - updateIptables, ssh_tunnel_to_syncd_rpc + self, duthosts, ptfhost, dutTestParams, updateIptables, ssh_tunnel_to_syncd_rpc ): """ Release all paused ports prior to running QoS SAI test cases @@ -1164,7 +1385,7 @@ def __deleteTmpSwitchConfig(self, duthost): duthost.file(path=file["path"], state="absent") @pytest.fixture(scope='class', autouse=True) - def handleFdbAging(self, tbinfo, duthosts, lower_tor_host, enum_rand_one_per_hwsku_frontend_hostname): + def handleFdbAging(self, tbinfo, duthosts, lower_tor_host, get_src_dst_asic_and_duts): """ Disable FDB aging and reenable at the end of tests @@ -1177,35 +1398,32 @@ def handleFdbAging(self, tbinfo, duthosts, lower_tor_host, enum_rand_one_per_hws Returns: None """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] fdbAgingTime = 0 - self.__deleteTmpSwitchConfig(duthost) - duthost.docker_copy_from_asic("swss", "/etc/swss/config.d/switch.json", "/tmp") - duthost.replace( - dest='/tmp/switch.json', - regexp='"fdb_aging_time": ".*"', - replace='"fdb_aging_time": "{0}"'.format(fdbAgingTime), - backup=True - ) - duthost.docker_copy_to_all_asics("swss", "/tmp/switch.json", "/etc/swss/config.d/switch.json") - self.__loadSwssConfig(duthost) + for duthost in get_src_dst_asic_and_duts['all_duts']: + self.__deleteTmpSwitchConfig(duthost) + duthost.docker_copy_from_asic("swss", "/etc/swss/config.d/switch.json", "/tmp") + duthost.replace( + dest='/tmp/switch.json', + regexp='"fdb_aging_time": ".*"', + replace='"fdb_aging_time": "{0}"'.format(fdbAgingTime), + backup=True + ) + duthost.docker_copy_to_all_asics("swss", "/tmp/switch.json", "/etc/swss/config.d/switch.json") + self.__loadSwssConfig(duthost) yield - - result = duthost.find(path=["/tmp"], patterns=["switch.json.*"]) - if result["matched"] > 0: - src = result["files"][0]["path"] - duthost.docker_copy_to_all_asics("swss", src, "/etc/swss/config.d/switch.json") - self.__loadSwssConfig(duthost) - self.__deleteTmpSwitchConfig(duthost) + for duthost in get_src_dst_asic_and_duts['all_duts']: + result = duthost.find(path=["/tmp"], patterns=["switch.json.*"]) + if result["matched"] > 0: + src = result["files"][0]["path"] + duthost.docker_copy_to_all_asics("swss", src, "/etc/swss/config.d/switch.json") + self.__loadSwssConfig(duthost) + self.__deleteTmpSwitchConfig(duthost) @pytest.fixture(scope='class', autouse=True) def populateArpEntries( - self, duthosts, enum_frontend_asic_index, enum_rand_one_per_hwsku_frontend_hostname, + self, duthosts, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, releaseAllPorts, handleFdbAging, tbinfo, lower_tor_host ): """ @@ -1225,12 +1443,9 @@ def populateArpEntries( Raises: RunAnsibleModuleFail if ptf test fails """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) + dut_asic = get_src_dst_asic_and_duts['src_asic'] + duthost = get_src_dst_asic_and_duts['src_dut'] dut_asic.command('sonic-clear fdb all') dut_asic.command('sonic-clear arp') @@ -1241,10 +1456,11 @@ def populateArpEntries( elif dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS: saiQosTest = "sai_qos_tests.ARPpopulatePTF" else: - result = dut_asic.command("arp -n") - pytest_assert(result["rc"] == 0, "failed to run arp command on {0}".format(duthost.hostname)) - if result["stdout"].find("incomplete") == -1: - saiQosTest = "sai_qos_tests.ARPpopulate" + for dut_asic in get_src_dst_asic_and_duts['all_asics']: + result = dut_asic.command("arp -n") + pytest_assert(result["rc"] == 0, "failed to run arp command on {0}".format(dut_asic.sonichost.hostname)) + if result["stdout"].find("incomplete") == -1: + saiQosTest = "sai_qos_tests.ARPpopulate" if saiQosTest: testParams = dutTestParams["basicParams"] @@ -1254,21 +1470,18 @@ def populateArpEntries( ) @pytest.fixture(scope='class', autouse=True) - def dut_disable_ipv6(self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, lower_tor_host): - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - - duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=1") + def dut_disable_ipv6(self, duthosts, get_src_dst_asic_and_duts, tbinfo, lower_tor_host): + for duthost in get_src_dst_asic_and_duts['all_duts']: + duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=1") yield - duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=0") + + for duthost in get_src_dst_asic_and_duts['all_duts']: + duthost.shell("sysctl -w net.ipv6.conf.all.disable_ipv6=0") @pytest.fixture(scope='class', autouse=True) def sharedHeadroomPoolSize( - self, request, duthosts, enum_frontend_asic_index, - enum_rand_one_per_hwsku_frontend_hostname, tbinfo, lower_tor_host + self, request, duthosts, get_src_dst_asic_and_duts, tbinfo, lower_tor_host ): """ Retreives shared headroom pool size @@ -1281,20 +1494,14 @@ def sharedHeadroomPoolSize( size: shared headroom pool size none if it is not defined """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - yield self.__getSharedHeadroomPoolSize( request, - duthost.asic_instance(enum_frontend_asic_index) + get_src_dst_asic_and_duts['src_asic'] ) @pytest.fixture(scope='class', autouse=True) def ingressLosslessProfile( - self, request, duthosts, enum_frontend_asic_index, - enum_rand_one_per_hwsku_frontend_hostname, dutConfig, tbinfo, lower_tor_host, dualtor_ports + self, request, get_src_dst_asic_and_duts, dutConfig, tbinfo, lower_tor_host, dualtor_ports_for_duts ): """ Retreives ingress lossless profile @@ -1308,15 +1515,12 @@ def ingressLosslessProfile( Returns: ingressLosslessProfile (dict): Map of ingress lossless buffer profile attributes """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) + dut_asic = get_src_dst_asic_and_duts['src_asic'] + duthost = get_src_dst_asic_and_duts['src_dut'] srcport = dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]] - if srcport in dualtor_ports: + if srcport in dualtor_ports_for_duts: pgs = "2-4" else: pgs = "3-4" @@ -1332,8 +1536,7 @@ def ingressLosslessProfile( @pytest.fixture(scope='class', autouse=True) def ingressLossyProfile( - self, request, duthosts, enum_frontend_asic_index, - enum_rand_one_per_hwsku_frontend_hostname, dutConfig, tbinfo, lower_tor_host + self, request, duthosts, get_src_dst_asic_and_duts, dutConfig, tbinfo, lower_tor_host ): """ Retreives ingress lossy profile @@ -1347,12 +1550,8 @@ def ingressLossyProfile( Returns: ingressLossyProfile (dict): Map of ingress lossy buffer profile attributes """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - - dut_asic = duthost.asic_instance(enum_frontend_asic_index) + duthost = get_src_dst_asic_and_duts['src_dut'] + dut_asic = get_src_dst_asic_and_duts['src_asic'] yield self.__getBufferProfile( request, dut_asic, @@ -1364,8 +1563,7 @@ def ingressLossyProfile( @pytest.fixture(scope='class', autouse=True) def egressLosslessProfile( - self, request, duthosts, enum_frontend_asic_index, - enum_rand_one_per_hwsku_frontend_hostname, dutConfig, tbinfo, lower_tor_host, dualtor_ports + self, request, duthosts, get_src_dst_asic_and_duts, dutConfig, tbinfo, lower_tor_host, dualtor_ports_for_duts ): """ Retreives egress lossless profile @@ -1379,15 +1577,12 @@ def egressLosslessProfile( Returns: egressLosslessProfile (dict): Map of egress lossless buffer profile attributes """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + duthost = get_src_dst_asic_and_duts['src_dut'] + dut_asic = get_src_dst_asic_and_duts['src_asic'] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) srcport = dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]] - if srcport in dualtor_ports: + if srcport in dualtor_ports_for_duts: queues = "2-4" else: queues = "3-4" @@ -1403,8 +1598,7 @@ def egressLosslessProfile( @pytest.fixture(scope='class', autouse=True) def egressLossyProfile( - self, request, duthosts, enum_frontend_asic_index, - enum_rand_one_per_hwsku_frontend_hostname, dutConfig, tbinfo, lower_tor_host, dualtor_ports + self, request, duthosts, get_src_dst_asic_and_duts, dutConfig, tbinfo, lower_tor_host, dualtor_ports_for_duts ): """ Retreives egress lossy profile @@ -1418,15 +1612,12 @@ def egressLossyProfile( Returns: egressLossyProfile (dict): Map of egress lossy buffer profile attributes """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + duthost = get_src_dst_asic_and_duts['src_dut'] + dut_asic = get_src_dst_asic_and_duts['src_asic'] - dut_asic = duthost.asic_instance(enum_frontend_asic_index) srcport = dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]] - if srcport in dualtor_ports: + if srcport in dualtor_ports_for_duts: queues = "0-1" else: queues = "0-2" @@ -1442,9 +1633,8 @@ def egressLossyProfile( @pytest.fixture(scope='class') def losslessSchedProfile( - self, duthosts, enum_frontend_asic_index, enum_rand_one_per_hwsku_frontend_hostname, - dutConfig, tbinfo, lower_tor_host - ): + self, duthosts, get_src_dst_asic_and_duts, dutConfig, tbinfo, lower_tor_host + ): """ Retreives lossless scheduler profile @@ -1456,22 +1646,18 @@ def losslessSchedProfile( Returns: losslessSchedProfile (dict): Map of scheduler parameters """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + dut_asic = get_src_dst_asic_and_duts['src_asic'] yield self.__getSchedulerParam( - duthost.asic_instance(enum_frontend_asic_index), + dut_asic, dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], self.TARGET_LOSSLESS_QUEUE_SCHED ) @pytest.fixture(scope='class') def lossySchedProfile( - self, duthosts, enum_frontend_asic_index, enum_rand_one_per_hwsku_frontend_hostname, - dutConfig, tbinfo, lower_tor_host - ): + self, duthosts, get_src_dst_asic_and_duts, dutConfig, tbinfo, lower_tor_host + ): """ Retreives lossy scheduler profile @@ -1483,20 +1669,16 @@ def lossySchedProfile( Returns: lossySchedProfile (dict): Map of scheduler parameters """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - + dut_asic = get_src_dst_asic_and_duts['src_asic'] yield self.__getSchedulerParam( - duthost.asic_instance(enum_frontend_asic_index), + dut_asic, dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], self.TARGET_LOSSY_QUEUE_SCHED ) @pytest.fixture def updateSchedProfile( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, + self, duthosts, get_src_dst_asic_and_duts, dutQosConfig, losslessSchedProfile, lossySchedProfile, tbinfo, lower_tor_host ): """ @@ -1511,11 +1693,6 @@ def updateSchedProfile( Returns: None """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host - else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - def updateRedisSchedParam(schedParam): """ Helper function to updates lossless/lossy scheduler profiles @@ -1526,17 +1703,18 @@ def updateRedisSchedParam(schedParam): Returns: None """ - duthost.asic_instance(enum_frontend_asic_index).run_redis_cmd( - argv = [ - "redis-cli", - "-n", - "4", - "HSET", - schedParam["profile"], - "weight", - schedParam["qosConfig"] - ] - ) + for a_asic in get_src_dst_asic_and_duts['all_asics']: + a_asic.run_redis_cmd( + argv = [ + "redis-cli", + "-n", + "4", + "HSET", + schedParam["profile"], + "weight", + schedParam["qosConfig"] + ] + ) wrrSchedParams = [ { @@ -1570,7 +1748,7 @@ def updateRedisSchedParam(schedParam): @pytest.fixture def resetWatermark( - self, duthosts, enum_frontend_asic_index, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, lower_tor_host + self, duthosts, get_src_dst_asic_and_duts, tbinfo, lower_tor_host ): """ Reset queue watermark @@ -1581,14 +1759,65 @@ def resetWatermark( Returns: None """ - if 'dualtor' in tbinfo['topo']['name']: - duthost = lower_tor_host + + for dut_asic in get_src_dst_asic_and_duts['all_asics']: + dut_asic.command("counterpoll watermark enable") + dut_asic.command("counterpoll queue enable") + dut_asic.command("sleep 70") + dut_asic.command("counterpoll watermark disable") + dut_asic.command("counterpoll queue disable") + + @pytest.fixture(scope='class') + def dualtor_ports_for_duts(request, get_src_dst_asic_and_duts): + # Fetch dual ToR ports + logger.info("Starting fetching dual ToR info") + + fetch_dual_tor_ports_script = "\ + local remap_enabled = redis.call('HGET', 'SYSTEM_DEFAULTS|tunnel_qos_remap', 'status')\ + if remap_enabled ~= 'enabled' then\ + return {}\ + end\ + local type = redis.call('HGET', 'DEVICE_METADATA|localhost', 'type')\ + local expected_neighbor_type\ + local expected_neighbor_suffix\ + if type == 'LeafRouter' then\ + expected_neighbor_type = 'ToRRouter'\ + expected_neighbor_suffix = 'T0'\ + else\ + if type == 'ToRRouter' then\ + local subtype = redis.call('HGET', 'DEVICE_METADATA|localhost', 'subtype')\ + if subtype == 'DualToR' then\ + expected_neighbor_type = 'LeafRouter'\ + expected_neighbor_suffix = 'T1'\ + end\ + end\ + end\ + if expected_neighbor_type == nil then\ + return {}\ + end\ + local result = {}\ + local all_ports_with_neighbor = redis.call('KEYS', 'DEVICE_NEIGHBOR|*')\ + for i = 1, #all_ports_with_neighbor, 1 do\ + local neighbor = redis.call('HGET', all_ports_with_neighbor[i], 'name')\ + if neighbor ~= nil and string.sub(neighbor, -2, -1) == expected_neighbor_suffix then\ + local peer_type = redis.call('HGET', 'DEVICE_NEIGHBOR_METADATA|' .. neighbor, 'type')\ + if peer_type == expected_neighbor_type then\ + table.insert(result, string.sub(all_ports_with_neighbor[i], 17, -1))\ + end\ + end\ + end\ + return result\ + " + + duthost = get_src_dst_asic_and_duts['src_dut'] + + dualtor_ports_str = get_src_dst_asic_and_duts['src_asic'].run_redis_cmd( + argv=["sonic-db-cli", "CONFIG_DB", "eval", fetch_dual_tor_ports_script, "0"]) + if dualtor_ports_str: + dualtor_ports_set = set(dualtor_ports_str) else: - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] - - dut_asic = duthost.asic_instance(enum_frontend_asic_index) - dut_asic.command("counterpoll watermark enable") - dut_asic.command("counterpoll queue enable") - dut_asic.command("sleep 70") - dut_asic.command("counterpoll watermark disable") - dut_asic.command("counterpoll queue disable") + dualtor_ports_set = set({}) + + logger.info("Finish fetching dual ToR info {}".format(dualtor_ports_set)) + + return dualtor_ports_set diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py index 41b2e50c4a6..b6adf889cc6 100644 --- a/tests/qos/test_qos_sai.py +++ b/tests/qos/test_qos_sai.py @@ -48,13 +48,14 @@ PTF_PORT_MAPPING_MODE = 'use_orig_interface' @pytest.fixture(autouse=True) -def ignore_expected_loganalyzer_exception(enum_rand_one_per_hwsku_frontend_hostname, loganalyzer): +def ignore_expected_loganalyzer_exception(get_src_dst_asic_and_duts, loganalyzer): """ignore the syslog ERR syncd0#syncd: [03:00.0] brcm_sai_set_switch_attribute:1920 updating switch mac addr failed with error -2""" ignore_regex = [ ".*ERR syncd[0-9]*#syncd.*brcm_sai_set_switch_attribute.*updating switch mac addr failed with error.*" ] if loganalyzer: - loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignore_regex) + for a_dut in get_src_dst_asic_and_duts['all_duts']: + loganalyzer[a_dut.hostname].ignore_regex.extend(ignore_regex) class TestQosSai(QosSaiBase): """TestQosSai derives from QosSaiBase and contains collection of QoS SAI test cases. @@ -110,21 +111,33 @@ def replaceNonExistentPortId(self, availablePortIds, portIds): return True - def updateTestPortIdIp(self, dutConfig, qosParams=None): + def updateTestPortIdIp(self, dutConfig, get_src_dst_asic_and_duts, qosParams=None): + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + src_testPortIds = dutConfig["testPortIds"][src_dut_index][src_asic_index] + dst_testPortIds = dutConfig["testPortIds"][dst_dut_index][dst_asic_index] + testPortIds = src_testPortIds + list(set(dst_testPortIds) - set(src_testPortIds)) portIdNames = [] portIds = [] + for idName in dutConfig["testPorts"]: if re.match('(?:src|dst)_port\S+id', idName): portIdNames.append(idName) ipName = idName.replace('id', 'ip') pytest_assert(ipName in dutConfig["testPorts"], 'Not find {} for {} in dutConfig'.format(ipName, idName)) portIds.append(dutConfig["testPorts"][idName]) - pytest_assert(self.replaceNonExistentPortId(dutConfig["testPortIds"], portIds), "No enough test ports") + pytest_assert(self.replaceNonExistentPortId(testPortIds, set(portIds)), "No enough test ports") for idx, idName in enumerate(portIdNames): dutConfig["testPorts"][idName] = portIds[idx] ipName = idName.replace('id', 'ip') - dutConfig["testPorts"][ipName] = dutConfig["testPortIps"][portIds[idx]]['peer_addr'] + if 'src' in ipName: + testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index] + else: + testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index] + dutConfig["testPorts"][ipName] = testPortIps[portIds[idx]]['peer_addr'] if qosParams != None: portIdNames = [] @@ -142,7 +155,7 @@ def updateTestPortIdIp(self, dutConfig, qosParams=None): portIds.append(ids) # record None to indicate it's just one port portNumbers.append(None) - pytest_assert(self.replaceNonExistentPortId(dutConfig["testPortIds"], portIds), "No enough test ports") + pytest_assert(self.replaceNonExistentPortId(testPortIds, portIds), "No enough test ports") startPos = 0 for idx, idName in enumerate(portIdNames): if portNumbers[idx] != None: # port list @@ -153,17 +166,18 @@ def updateTestPortIdIp(self, dutConfig, qosParams=None): startPos += 1 def testParameter( - self, duthost, dutConfig, dutQosConfig, ingressLosslessProfile, - ingressLossyProfile, egressLosslessProfile, dualtor_ports + self, duthosts, get_src_dst_asic_and_duts, dutConfig, dutQosConfig, ingressLosslessProfile, + ingressLossyProfile, egressLosslessProfile, dualtor_ports_for_duts ): - logger.info("asictype {}".format(duthost.facts["asic_type"])) + logger.info("asictype {}".format(get_src_dst_asic_and_duts['src_dut'].facts["asic_type"])) logger.info("config {}".format(dutConfig)) logger.info("qosConfig {}".format(dutQosConfig)) - logger.info("dualtor_ports {}".format(dualtor_ports)) + logger.info("dualtor_ports {}".format(dualtor_ports_for_duts)) @pytest.mark.parametrize("xoffProfile", ["xoff_1", "xoff_2", "xoff_3", "xoff_4"]) def testQosSaiPfcXoffLimit( - self, xoffProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, xoffProfile, duthosts, get_src_dst_asic_and_duts, + ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile, egressLosslessProfile ): """ @@ -195,7 +209,7 @@ def testQosSaiPfcXoffLimit( else: qosConfig = dutQosConfig["param"][portSpeedCableLength] - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -310,7 +324,7 @@ def testPfcStormWithSharedHeadroomOccupancy( testParams["cell_size"] = qosConfig[xonProfile]["cell_size"] # Params required for generating a PFC Storm - duthost = dutConfig["dutInstance"] + duthost = dutConfig["srcDutInstance"] pfcwd_timers = set_pfc_timers() pfcwd_test_port_id = dutConfig["testPorts"]["src_port_id"] pfcwd_test_port = dutConfig["dutInterfaces"][pfcwd_test_port_id] @@ -410,7 +424,7 @@ def testPfcStormWithSharedHeadroomOccupancy( @pytest.mark.parametrize("xonProfile", ["xon_1", "xon_2", "xon_3", "xon_4"]) def testQosSaiPfcXonLimit( - self, xonProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, get_src_dst_asic_and_duts, xonProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile ): """ @@ -444,7 +458,7 @@ def testQosSaiPfcXonLimit( else: qosConfig = dutQosConfig["param"] - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) dst_port_count = set([ dutConfig["testPorts"]["dst_port_id"], @@ -504,7 +518,8 @@ def testQosSaiPfcXonLimit( @pytest.mark.parametrize("LosslessVoqProfile", ["lossless_voq_1", "lossless_voq_2", "lossless_voq_3", "lossless_voq_4"]) def testQosSaiLosslessVoq( - self, LosslessVoqProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, singleMemberPortStaticRoute, nearbySourcePorts + self, LosslessVoqProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + singleMemberPortStaticRoute, nearbySourcePorts, get_src_dst_asic_and_duts ): """ Test QoS SAI XOFF limits for various voq mode configurations @@ -527,12 +542,15 @@ def testQosSaiLosslessVoq( qosConfig = dutQosConfig["param"][portSpeedCableLength]["breakout"] else: qosConfig = dutQosConfig["param"][portSpeedCableLength] - self.updateTestPortIdIp(dutConfig, qosConfig[LosslessVoqProfile]) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts, qosConfig[LosslessVoqProfile]) dst_port_id, dst_port_ip = singleMemberPortStaticRoute src_port_1_id, src_port_2_id = nearbySourcePorts - testPortIps = dutConfig["testPortIps"] + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index] + testParams = dict() testParams.update(dutTestParams["basicParams"]) testParams.update({ @@ -566,7 +584,7 @@ def testQosSaiLosslessVoq( ) def testQosSaiHeadroomPoolSize( - self, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile ): """ @@ -598,7 +616,26 @@ def testQosSaiHeadroomPoolSize( qosConfig['hdrm_pool_size']['pgs'] = qosConfig['hdrm_pool_size']['pgs'][:2] qosConfig['hdrm_pool_size']['dscps'] = qosConfig['hdrm_pool_size']['dscps'][:2] - self.updateTestPortIdIp(dutConfig, qosConfig["hdrm_pool_size"]) + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + + if ('platform_asic' in dutTestParams["basicParams"] and dutTestParams["basicParams"]["platform_asic"] == "broadcom-dnx"): + # Need to adjust hdrm_pool_size src_port_ids, dst_port_id and pgs_num based on how many source and dst ports + # present + src_ports = dutConfig['testPortIds'][src_dut_index][src_asic_index] + if get_src_dst_asic_and_duts['src_asic'] == get_src_dst_asic_and_duts['dst_asic']: + # Src and dst are the same asics, leave one for dst port and the rest for src ports + qosConfig["hdrm_pool_size"]["src_port_ids"] = src_ports[:-1] + qosConfig["hdrm_pool_size"]["dst_port_id"] = src_ports[-1] + qosConfig["hdrm_pool_size"]["pgs_num"] = 2 * len(qosConfig["hdrm_pool_size"]["src_port_ids"]) + else: + qosConfig["hdrm_pool_size"]["src_port_ids"] = src_ports + qosConfig["hdrm_pool_size"]["dst_port_id"] = dutConfig['testPortIds'][dst_dut_index][dst_asic_index][-1] + qosConfig["hdrm_pool_size"]["pgs_num"] = 2 * len(qosConfig["hdrm_pool_size"]["src_port_ids"]) + + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts, qosConfig["hdrm_pool_size"]) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -608,9 +645,10 @@ def testQosSaiHeadroomPoolSize( "ecn": qosConfig["hdrm_pool_size"]["ecn"], "pgs": qosConfig["hdrm_pool_size"]["pgs"], "src_port_ids": qosConfig["hdrm_pool_size"]["src_port_ids"], - "src_port_ips": [testPortIps[port]['peer_addr'] for port in qosConfig["hdrm_pool_size"]["src_port_ids"]], + "src_port_ips": [testPortIps[src_dut_index][src_asic_index][port]['peer_addr'] + for port in qosConfig["hdrm_pool_size"]["src_port_ids"]], "dst_port_id": qosConfig["hdrm_pool_size"]["dst_port_id"], - "dst_port_ip": testPortIps[qosConfig["hdrm_pool_size"]["dst_port_id"]]['peer_addr'], + "dst_port_ip": testPortIps[dst_dut_index][dst_asic_index][qosConfig["hdrm_pool_size"]["dst_port_id"]]['peer_addr'], "pgs_num": qosConfig["hdrm_pool_size"]["pgs_num"], "pkts_num_trig_pfc": qosConfig["hdrm_pool_size"]["pkts_num_trig_pfc"], "pkts_num_leak_out": qosConfig["pkts_num_leak_out"], @@ -651,7 +689,7 @@ def testQosSaiHeadroomPoolSize( @pytest.mark.parametrize("sharedResSizeKey", ["shared_res_size_1", "shared_res_size_2"]) def testQosSaiSharedReservationSize( - self, sharedResSizeKey, ptfhost, dutTestParams, dutConfig, dutQosConfig + self, sharedResSizeKey, ptfhost, dutTestParams, dutConfig, dutQosConfig, get_src_dst_asic_and_duts ): """ Test QoS SAI shared reservation size @@ -669,7 +707,12 @@ def testQosSaiSharedReservationSize( """ qosConfig = dutQosConfig["param"] - testPortIps = dutConfig["testPortIps"] + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + src_testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index] + dst_testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index] if not sharedResSizeKey in qosConfig.keys(): pytest.skip("Shared reservation size parametrization '%s' is not enabled" % sharedResSizeKey) @@ -678,12 +721,13 @@ def testQosSaiSharedReservationSize( # Skip if buffer pools and profiles are not be present, marked by qos param generator pytest.skip(qosConfig[sharedResSizeKey]["skip"]) - self.updateTestPortIdIp(dutConfig, qosConfig[sharedResSizeKey]) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts, qosConfig[sharedResSizeKey]) - port_idx_to_id = testPortIps.keys() + src_port_idx_to_id = list(src_testPortIps.keys()) + dst_port_idx_to_id = list(dst_testPortIps.keys()) # Translate requested port indices to available port IDs - src_port_ids = [port_idx_to_id[idx] for idx in qosConfig[sharedResSizeKey]["src_port_i"]] - dst_port_ids = [port_idx_to_id[idx] for idx in qosConfig[sharedResSizeKey]["dst_port_i"]] + src_port_ids = [src_port_idx_to_id[idx] for idx in qosConfig[sharedResSizeKey]["src_port_i"]] + dst_port_ids = [dst_port_idx_to_id[idx] for idx in qosConfig[sharedResSizeKey]["dst_port_i"]] testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -694,9 +738,9 @@ def testQosSaiSharedReservationSize( "pgs": qosConfig[sharedResSizeKey]["pgs"], "queues": qosConfig[sharedResSizeKey]["queues"], "src_port_ids": src_port_ids, - "src_port_ips": [testPortIps[port]['peer_addr'] for port in src_port_ids], + "src_port_ips": [src_testPortIps[port]['peer_addr'] for port in src_port_ids], "dst_port_ids": dst_port_ids, - "dst_port_ips": [testPortIps[port]['peer_addr'] for port in dst_port_ids], + "dst_port_ips": [dst_testPortIps[port]['peer_addr'] for port in dst_port_ids], "pkt_counts": qosConfig[sharedResSizeKey]["pkt_counts"], "shared_limit_bytes": qosConfig[sharedResSizeKey]["shared_limit_bytes"], "hwsku":dutTestParams['hwsku'] @@ -722,7 +766,7 @@ def testQosSaiSharedReservationSize( ) def testQosSaiHeadroomPoolWatermark( - self, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, dutTestParams, + self, duthosts, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile, sharedHeadroomPoolSize, resetWatermark ): @@ -747,7 +791,7 @@ def testQosSaiHeadroomPoolWatermark( Raises: RunAnsibleModuleFail if ptf test fails """ - duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname] + duthost = get_src_dst_asic_and_duts['src_dut'] cmd_output = duthost.shell("show headroom-pool watermark", module_ignore_errors=True) if cmd_output['rc'] != 0: pytest.skip("Headroom pool watermark is not supported") @@ -758,6 +802,21 @@ def testQosSaiHeadroomPoolWatermark( if not 'hdrm_pool_size' in qosConfig.keys(): pytest.skip("Headroom pool size is not enabled on this DUT") + src_dut_index = get_src_dst_asic_and_duts['src_dut_index'] + dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index'] + src_asic_index = get_src_dst_asic_and_duts['src_asic_index'] + dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index'] + src_ports = dutConfig['testPortIds'][src_dut_index][src_asic_index] + if get_src_dst_asic_and_duts['src_asic'] == get_src_dst_asic_and_duts['dst_asic']: + # Src and dst are the same asics, leave one for dst port and the rest for src ports + qosConfig["hdrm_pool_size"]["src_port_ids"] = src_ports[:-1] + qosConfig["hdrm_pool_size"]["dst_port_id"] = src_ports[-1] + qosConfig["hdrm_pool_size"]["pgs_num"] = 2 * len(qosConfig["hdrm_pool_size"]["src_port_ids"]) + else: + qosConfig["hdrm_pool_size"]["src_port_ids"] = src_ports + qosConfig["hdrm_pool_size"]["dst_port_id"] = dutConfig['testPortIds'][dst_dut_index][dst_asic_index][-1] + qosConfig["hdrm_pool_size"]["pgs_num"] = 2 * len(qosConfig["hdrm_pool_size"]["src_port_ids"]) + testParams = dict() testParams.update(dutTestParams["basicParams"]) testParams.update({ @@ -766,9 +825,9 @@ def testQosSaiHeadroomPoolWatermark( "ecn": qosConfig["hdrm_pool_size"]["ecn"], "pgs": qosConfig["hdrm_pool_size"]["pgs"], "src_port_ids": qosConfig["hdrm_pool_size"]["src_port_ids"], - "src_port_ips": [testPortIps[port]['peer_addr'] for port in qosConfig["hdrm_pool_size"]["src_port_ids"]], + "src_port_ips": [testPortIps[src_dut_index][src_asic_index][port]['peer_addr'] for port in qosConfig["hdrm_pool_size"]["src_port_ids"]], "dst_port_id": qosConfig["hdrm_pool_size"]["dst_port_id"], - "dst_port_ip": testPortIps[qosConfig["hdrm_pool_size"]["dst_port_id"]]['peer_addr'], + "dst_port_ip": testPortIps[dst_dut_index][dst_asic_index][qosConfig["hdrm_pool_size"]["dst_port_id"]]['peer_addr'], "pgs_num": qosConfig["hdrm_pool_size"]["pgs_num"], "pkts_num_leak_out": qosConfig["pkts_num_leak_out"], "pkts_num_trig_pfc": qosConfig["hdrm_pool_size"]["pkts_num_trig_pfc"], @@ -804,7 +863,7 @@ def testQosSaiHeadroomPoolWatermark( @pytest.mark.parametrize("bufPool", ["wm_buf_pool_lossless", "wm_buf_pool_lossy"]) def testQosSaiBufferPoolWatermark( - self, request, bufPool, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, request, get_src_dst_asic_and_duts, bufPool, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile, egressLossyProfile, resetWatermark, ): """ @@ -880,7 +939,7 @@ def testQosSaiBufferPoolWatermark( ) def testQosSaiLossyQueue( - self, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, ingressLossyProfile ): """ @@ -906,7 +965,7 @@ def testQosSaiLossyQueue( else: qosConfig = dutQosConfig["param"] - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -951,7 +1010,7 @@ def testQosSaiLossyQueue( @pytest.mark.parametrize("LossyVoq", ["lossy_queue_voq_1", "lossy_queue_voq_2"]) def testQosSaiLossyQueueVoq( self, LossyVoq, ptfhost, dutTestParams, dutConfig, dutQosConfig, - ingressLossyProfile, duthost, localhost, singleMemberPortStaticRoute + ingressLossyProfile, duthost, localhost, singleMemberPortStaticRoute, get_src_dst_asic_and_duts ): """ Test QoS SAI Lossy queue with non_default voq and default voq @@ -980,7 +1039,7 @@ def testQosSaiLossyQueueVoq( original_voq_markings = get_markings_dut(duthost) setup_markings_dut(duthost, localhost, voq_allocation_mode="default") - self.updateTestPortIdIp(dutConfig, qosConfig[LossyVoq]) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts, qosConfig[LossyVoq]) try: dst_port_id, dst_port_ip = singleMemberPortStaticRoute @@ -1021,7 +1080,7 @@ def testQosSaiLossyQueueVoq( setup_markings_dut(duthost, localhost, **original_voq_markings) def testQosSaiDscpQueueMapping( - self, duthost, ptfhost, dutTestParams, dutConfig, dut_qos_maps + self, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dut_qos_maps ): """ Test QoS SAI DSCP to queue mapping @@ -1040,10 +1099,11 @@ def testQosSaiDscpQueueMapping( RunAnsibleModuleFail if ptf test fails """ # Skip the regular dscp to pg mapping test. Will run another test case instead. + duthost = get_src_dst_asic_and_duts['src_dut'] if separated_dscp_to_tc_map_on_uplink(duthost, dut_qos_maps): pytest.skip("Skip this test since separated DSCP_TO_TC_MAP is applied") - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -1204,7 +1264,7 @@ def testQosSaiDot1pPgMapping( ) def testQosSaiDwrr( - self, ptfhost, duthost, dutTestParams, dutConfig, dutQosConfig, + self, ptfhost, duthosts, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, ): """ Test QoS SAI DWRR @@ -1229,9 +1289,11 @@ def testQosSaiDwrr( qosConfigWrr = qosConfig[portSpeedCableLength]["wrr"] else: qosConfigWrr = qosConfig["wrr"] + + duthost = get_src_dst_asic_and_duts['src_dut'] qos_remap_enable = is_tunnel_qos_remap_enabled(duthost) - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -1272,7 +1334,7 @@ def testQosSaiDwrr( @pytest.mark.parametrize("pgProfile", ["wm_pg_shared_lossless", "wm_pg_shared_lossy"]) def testQosSaiPgSharedWatermark( - self, pgProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, pgProfile, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, resetWatermark ): """ @@ -1308,7 +1370,7 @@ def testQosSaiPgSharedWatermark( elif "wm_pg_shared_lossy" in pgProfile: pktsNumFillShared = int(qosConfig[pgProfile]["pkts_num_trig_egr_drp"]) - 1 - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -1352,7 +1414,7 @@ def testQosSaiPgSharedWatermark( ) def testQosSaiPgHeadroomWatermark( - self, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark, + self, ptfhost, get_src_dst_asic_and_duts, dutTestParams, dutConfig, dutQosConfig, resetWatermark, ): """ Test QoS SAI PG headroom watermark test @@ -1377,7 +1439,7 @@ def testQosSaiPgHeadroomWatermark( else: qosConfig = dutQosConfig["param"][portSpeedCableLength] - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -1470,7 +1532,7 @@ def testQosSaiPGDrop( @pytest.mark.parametrize("queueProfile", ["wm_q_shared_lossless", "wm_q_shared_lossy"]) def testQosSaiQSharedWatermark( - self, queueProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + self, get_src_dst_asic_and_duts, queueProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark ): """ @@ -1506,7 +1568,7 @@ def testQosSaiQSharedWatermark( qosConfig = dutQosConfig["param"] triggerDrop = qosConfig[queueProfile]["pkts_num_trig_egr_drp"] - self.updateTestPortIdIp(dutConfig) + self.updateTestPortIdIp(dutConfig, get_src_dst_asic_and_duts) testParams = dict() testParams.update(dutTestParams["basicParams"]) @@ -1546,7 +1608,7 @@ def testQosSaiQSharedWatermark( ) def testQosSaiDscpToPgMapping( - self, duthost, request, ptfhost, dutTestParams, dutConfig, dut_qos_maps + self, get_src_dst_asic_and_duts, duthost, request, ptfhost, dutTestParams, dutConfig, dut_qos_maps ): """ Test QoS SAI DSCP to PG mapping ptf test @@ -1581,7 +1643,7 @@ def testQosSaiDscpToPgMapping( "dst_port_id": dutConfig["testPorts"]["dst_port_id"], "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], "src_port_id": dutConfig["testPorts"]["src_port_id"], - "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"] }) if "platform_asic" in dutTestParams["basicParams"]: @@ -1653,7 +1715,7 @@ def testQosSaiSeparatedDscpToPgMapping(self, duthost, request, ptfhost, dutTestP def testQosSaiDwrrWeightChange( - self, ptfhost, duthost, dutTestParams, dutConfig, dutQosConfig, + self, get_src_dst_asic_and_duts, ptfhost, dutTestParams, dutConfig, dutQosConfig, updateSchedProfile ): """ @@ -1681,6 +1743,7 @@ def testQosSaiDwrrWeightChange( else: qosConfigWrrChg = qosConfig["wrr_chg"] + duthost = get_src_dst_asic_and_duts['src_dut'] qos_remap_enable = is_tunnel_qos_remap_enabled(duthost) testParams = dict() testParams.update(dutTestParams["basicParams"]) diff --git a/tests/saitests/py3/sai_base_test.py b/tests/saitests/py3/sai_base_test.py index f97541d758f..721c046ffb5 100644 --- a/tests/saitests/py3/sai_base_test.py +++ b/tests/saitests/py3/sai_base_test.py @@ -12,6 +12,8 @@ from ptf.base_tests import BaseTest from ptf import config import ptf.testutils as testutils +import json +import socket ################################################################ # @@ -23,11 +25,11 @@ from thrift.transport import TSocket from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol -import socket import sys import paramiko from paramiko.ssh_exception import BadHostKeyException, AuthenticationException, SSHException +# dictionary of interface_to_front_mapping with key 'src' or 'dst' and the ports for those target interface_to_front_mapping = {} from switch import (sai_thrift_port_tx_enable, @@ -41,44 +43,78 @@ def setUp(self): BaseTest.setUp(self) self.test_params = testutils.test_params_get() - if "server" in self.test_params: - self.server = self.test_params['server'] + # server is a list [ , , ... } + if "src_server" in self.test_params: + # server has format : + src_server = self.test_params['src_server'].strip().split(":") + self.src_server_ip = src_server[0] + src_server_port = src_server[1] else: - self.server = 'localhost' - self.platform_asic = self.test_params.get('platform_asic', None) + self.src_server_ip = 'localhost' + src_server_port = 9092 + + if "dst_server" in self.test_params: + # server has format : + dst_server = self.test_params['dst_server'].strip().split(":") + self.dst_server_ip = dst_server[0] + dst_server_port = dst_server[1] + else: + self.dst_server_ip = self.src_server_ip + dst_server_port = src_server_port - self.asic_id = self.test_params.get('asic_id', None) - if "port_map" in self.test_params: - user_input = self.test_params['port_map'] - splitted_map = user_input.split(",") - for item in splitted_map: - interface_front_pair = item.split("@") - interface_to_front_mapping[interface_front_pair[0]] = interface_front_pair[1] - elif "port_map_file" in self.test_params: + if "port_map_file" in self.test_params: user_input = self.test_params['port_map_file'] - f = open(user_input, 'r') - for line in f: - if (len(line) > 0 and (line[0] == '#' or line[0] == ';' or line[0]=='/')): - continue - interface_front_pair = line.split("@") - interface_to_front_mapping[interface_front_pair[0]] = interface_front_pair[1].strip() - f.close() + interface_to_front_mapping['src'] = {} + with open(user_input) as f: + ptf_test_port_map = json.load(f) + src_dut_index = self.test_params['src_dut_index'] + self.src_asic_index = self.test_params.get('src_asic_index', None) + dst_dut_index = self.test_params['dst_dut_index'] + self.dst_asic_index = self.test_params.get('dst_asic_index', None) + for a_ptf_port, a_ptf_port_info in ptf_test_port_map.items(): + if src_dut_index in a_ptf_port_info['target_dut'] and \ + a_ptf_port_info['asic_idx'] == self.src_asic_index: + interface_to_front_mapping['src'][a_ptf_port] = a_ptf_port_info['dut_port'] + if src_dut_index != dst_dut_index or self.src_asic_index != self.dst_asic_index: + interface_to_front_mapping['dst'] = {} + for a_ptf_port, a_ptf_port_info in ptf_test_port_map.items(): + if dst_dut_index in a_ptf_port_info['target_dut'] and \ + a_ptf_port_info['asic_idx'] == self.dst_asic_index: + interface_to_front_mapping['dst'][a_ptf_port] = a_ptf_port_info['dut_port'] else: exit("No ptf interface<-> switch front port mapping, please specify as parameter or in external file") - + # dictionary with key 'src' or 'dst' + self.clients = {} # Set up thrift client and contact server - self.transport = TSocket.TSocket(self.server, 9092) - self.transport = TTransport.TBufferedTransport(self.transport) - self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport) - self.client = switch_sai_rpc.Client(self.protocol) - self.transport.open() + # Below are dictionaries with key dut_index, and value the value for dut at dut_index. + self.src_transport = TSocket.TSocket(self.src_server_ip, src_server_port) + self.src_transport = TTransport.TBufferedTransport(self.src_transport) + self.src_protocol = TBinaryProtocol.TBinaryProtocol(self.src_transport) + self.src_client = switch_sai_rpc.Client(self.src_protocol) + self.src_transport.open() + self.clients['src'] = self.src_client + if self.src_server_ip == self.dst_server_ip and src_server_port == dst_server_port: + # using the same client for dst as the src. + self.dst_client = self.src_client + else: + # using different client for dst + self.dst_transport = TSocket.TSocket(self.dst_server_ip, dst_server_port) + self.dst_transport = TTransport.TBufferedTransport(self.dst_transport) + self.dst_protocol = TBinaryProtocol.TBinaryProtocol(self.dst_transport) + self.dst_client = switch_sai_rpc.Client(self.dst_protocol) + self.dst_transport.open() + self.clients['dst'] = self.dst_client + + self.platform_asic = self.test_params.get('platform_asic', None) def tearDown(self): if config["log_dir"] != None: self.dataplane.stop_pcap() BaseTest.tearDown(self) - self.transport.close() + self.src_transport.close() + if self.dst_client != self.src_client: + self.dst_transport.close() def exec_cmd_on_dut(self, hostname, username, password, cmd): client = paramiko.SSHClient() @@ -113,29 +149,30 @@ def exec_cmd_on_dut(self, hostname, username, password, cmd): return stdOut, stdErr, retValue - def sai_thrift_port_tx_enable(self, client, asic_type, port_list, last_port=True): + def sai_thrift_port_tx_enable(self, client, asic_type, port_list, target='dst', last_port=True): if self.platform_asic and self.platform_asic == "broadcom-dnx" and last_port: # need to enable watchdog on the source asic using cint script - cmd = "bcmcmd -n {} \"BCMSAI credit-watchdog enable\"".format(self.asic_id) - stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.server, + cmd = "bcmcmd -n {} \"BCMSAI credit-watchdog enable\"".format(self.src_asic_index) + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.src_server_ip, self.test_params['dut_username'], self.test_params['dut_password'], cmd) - assert ('Success rv = 0' in stdOut[1], "enable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.asic_id, - self.server)) + assert ('Success rv = 0' in stdOut[1], "enable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.src_asic_index, + self.src_server_ip)) + + sai_thrift_port_tx_enable(client, asic_type, port_list, target=target) - sai_thrift_port_tx_enable(client, asic_type, port_list) - def sai_thrift_port_tx_disable(self, client, asic_type, port_list): + def sai_thrift_port_tx_disable(self, client, asic_type, port_list, target='dst'): if self.platform_asic and self.platform_asic == "broadcom-dnx": # need to enable watchdog on the source asic using cint script - cmd = "bcmcmd -n {} \"BCMSAI credit-watchdog disable\"".format(self.asic_id) - stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.server, + cmd = "bcmcmd -n {} \"BCMSAI credit-watchdog disable\"".format(self.src_asic_index) + stdOut, stdErr, retValue = self.exec_cmd_on_dut(self.src_server_ip, self.test_params['dut_username'], self.test_params['dut_password'], cmd) - assert ('Success rv = 0' in stdOut[1]), "disable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.asic_id, - self.server) - sai_thrift_port_tx_disable(client, asic_type, port_list) + assert ('Success rv = 0' in stdOut[1]), "disable wd failed '{}' on asic '{}' on '{}'".format(cmd, self.src_asic_index, + self.src_server_ip) + sai_thrift_port_tx_disable(client, asic_type, port_list, target=target) class ThriftInterfaceDataPlane(ThriftInterface): diff --git a/tests/saitests/py3/sai_qos_tests.py b/tests/saitests/py3/sai_qos_tests.py index 47f06d2dd7a..ac4baf021b7 100644 --- a/tests/saitests/py3/sai_qos_tests.py +++ b/tests/saitests/py3/sai_qos_tests.py @@ -214,12 +214,12 @@ def fill_leakout_plus_one(test_case, src_port_id, dst_port_id, pkt, queue, asic_ # Returns whether 1 packet was successfully enqueued. if asic_type in ['cisco-8000']: queue_counters_base = sai_thrift_read_queue_occupancy( - test_case.client, dst_port_id) + test_case.dst_client, "dst", dst_port_id) max_packets = 500 for packet_i in range(max_packets): send_packet(test_case, src_port_id, pkt, 1) queue_counters = sai_thrift_read_queue_occupancy( - test_case.client, dst_port_id) + test_case.clients['dst'], "dst", dst_port_id) if queue_counters[queue] > queue_counters_base[queue]: print("fill_leakout_plus_one: Success, sent %d packets, queue occupancy bytes rose from %d to %d" % ( packet_i + 1, queue_counters_base[queue], queue_counters[queue]), file=sys.stderr) @@ -231,7 +231,7 @@ class ARPpopulate(sai_base_test.ThriftInterfaceDataPlane): def setUp(self): sai_base_test.ThriftInterfaceDataPlane.setUp(self) time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters self.router_mac = self.test_params['router_mac'] @@ -292,20 +292,20 @@ def runTest(self): class ReleaseAllPorts(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) asic_type = self.test_params['sonic_asic_type'] - self.sai_thrift_port_tx_enable( - self.client, asic_type, list(port_list.keys())) + for target, a_client in self.clients.items(): + self.sai_thrift_port_tx_enable(a_client, asic_type, list(port_list[target].keys()), target=target) # DSCP to queue mapping class DscpMappingPB(sai_base_test.ThriftInterfaceDataPlane): - def get_port_id(self, port_name): - sai_port_id = self.client.sai_thrift_get_port_id_by_front_port( + def get_port_id(self, client, port_name): + sai_port_id = client.sai_thrift_get_port_id_by_front_port( port_name ) print("Port name {}, SAI port id {}".format( @@ -314,7 +314,7 @@ def get_port_id(self, port_name): return sai_port_id def runTest(self): - switch_init(self.client) + switch_init(self.clients) router_mac = self.test_params['router_mac'] dst_port_id = int(self.test_params['dst_port_id']) @@ -348,13 +348,13 @@ def runTest(self): test_dst_port_name = self.test_params.get("test_dst_port_name") sai_dst_port_id = None if test_dst_port_name is not None: - sai_dst_port_id = self.get_port_id(test_dst_port_name) + sai_dst_port_id = self.get_port_id(self.dst_client, test_dst_port_name) else: - sai_dst_port_id = port_list[dst_port_id] + sai_dst_port_id = port_list['dst'][dst_port_id] time.sleep(10) # port_results is not of our interest here - port_results, queue_results_base = sai_thrift_read_port_counters(self.client, asic_type, sai_dst_port_id) + port_results, queue_results_base = sai_thrift_read_port_counters(self.dst_client, asic_type, sai_dst_port_id) # DSCP Mapping test try: @@ -407,7 +407,7 @@ def runTest(self): # Read Counters time.sleep(10) - port_results, queue_results = sai_thrift_read_port_counters(self.client, asic_type, sai_dst_port_id) + port_results, queue_results = sai_thrift_read_port_counters(self.dst_client, asic_type, sai_dst_port_id) print(list(map(operator.sub, queue_results, queue_results_base)), file=sys.stderr) @@ -466,7 +466,7 @@ def runTest(self): class Dot1pToQueueMapping(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) # Parse input parameters router_mac = self.test_params['router_mac'] @@ -511,7 +511,8 @@ def runTest(self): try: for queue, dot1ps in list(queue_dot1p_map.items()): - port_results, queue_results_base = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + port_results, queue_results_base = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) # send pkts with dot1ps that map to the same queue for dot1p in dot1ps: @@ -544,7 +545,8 @@ def runTest(self): # validate queue counters increment by the correct pkt num time.sleep(8) - port_results, queue_results = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + port_results, queue_results = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) print(queue_results_base, file=sys.stderr) print(queue_results, file=sys.stderr) print(list(map(operator.sub, queue_results, @@ -590,7 +592,7 @@ def runTest(self): class DscpToPgMapping(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) # Parse input parameters router_mac = self.test_params['router_mac'] @@ -642,7 +644,7 @@ def runTest(self): try: for pg, dscps in list(pg_dscp_map.items()): pg_cntrs_base = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) # send pkts with dscps that map to the same pg for dscp in dscps: @@ -663,7 +665,7 @@ def runTest(self): # validate pg counters increment by the correct pkt num time.sleep(8) pg_cntrs = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print(pg_cntrs_base, file=sys.stderr) print(pg_cntrs, file=sys.stderr) print(list(map(operator.sub, pg_cntrs, pg_cntrs_base)), @@ -698,7 +700,8 @@ def runTest(self): tos = dscps[dscp_recv_cnt] << 2 tos |= 1 try: - if (recv_pkt.payload.tos == tos) and (recv_pkt.payload.src == src_port_ip) and (recv_pkt.payload.dst == dst_port_ip) and \ + if (recv_pkt.payload.tos == tos) and (recv_pkt.payload.src == src_port_ip) and \ + (recv_pkt.payload.dst == dst_port_ip) and \ (recv_pkt.payload.ttl == exp_ttl) and (recv_pkt.payload.id == exp_ip_id): dscp_recv_cnt += 1 @@ -717,7 +720,8 @@ def runTest(self): # Tunnel DSCP to PG mapping test class TunnelDscpToPgMapping(sai_base_test.ThriftInterfaceDataPlane): - def _build_testing_pkt(self, active_tor_mac, standby_tor_mac, active_tor_ip, standby_tor_ip, inner_dscp, outer_dscp, dst_ip, ecn=1): + def _build_testing_pkt(self, active_tor_mac, standby_tor_mac, active_tor_ip, standby_tor_ip, inner_dscp, + outer_dscp, dst_ip, ecn=1): pkt = simple_tcp_packet( eth_dst=standby_tor_mac, ip_src='1.1.1.1', @@ -743,7 +747,7 @@ def runTest(self): This test case is to tx some ip_in_ip packet from Mux tunnel, and check if the traffic is mapped to expected PGs. """ - switch_init(self.client) + switch_init(self.clients) # Parse input parameters active_tor_mac = self.test_params['active_tor_mac'] @@ -772,7 +776,7 @@ def runTest(self): try: # Disable tx on EGRESS port so that headroom buffer cannot be free - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) # There are packet leak even port tx is disabled (18 packets leak on TD3 found) # Hence we send some packet to fill the leak before testing @@ -801,23 +805,25 @@ def runTest(self): outer_dscp=0, dst_ip=dst_port_ip ) - pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[src_port_id]) + pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.src_client, asic_type, + port_list['src'][src_port_id]) send_packet(self, src_port_id, pkt, PKT_NUM) # validate pg counters increment by the correct pkt num time.sleep(8) - pg_shared_wm_res = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[src_port_id]) + pg_shared_wm_res = sai_thrift_read_pg_shared_watermark(self.src_client, asic_type, + port_list['src'][src_port_id]) assert(pg_shared_wm_res[pg] - pg_shared_wm_res_base[pg] <= (PKT_NUM + ERROR_TOLERANCE[pg]) * cell_size) assert(pg_shared_wm_res[pg] - pg_shared_wm_res_base[pg] >= (PKT_NUM - ERROR_TOLERANCE[pg]) * cell_size) finally: # Enable tx on dest port - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) # DOT1P to pg mapping class Dot1pToPgMapping(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) # Parse input parameters router_mac = self.test_params['router_mac'] @@ -858,7 +864,7 @@ def runTest(self): try: for pg, dot1ps in list(pg_dot1p_map.items()): pg_cntrs_base = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) # send pkts with dot1ps that map to the same pg for dot1p in dot1ps: @@ -892,7 +898,7 @@ def runTest(self): # validate pg counters increment by the correct pkt num time.sleep(8) pg_cntrs = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print(pg_cntrs_base, file=sys.stderr) print(pg_cntrs, file=sys.stderr) print(list(map(operator.sub, pg_cntrs, pg_cntrs_base)), @@ -938,7 +944,7 @@ def runTest(self): class PFCtest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp = int(self.test_params['dscp']) @@ -974,7 +980,7 @@ def runTest(self): # dscp 3 -> pg 3 # dscp 4 -> pg 4 pg_dropped_cntrs_old = sai_thrift_read_pg_drop_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) # Prepare IP packet data ttl = 64 @@ -1015,8 +1021,10 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here - recv_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters_base, _ = sai_thrift_read_port_counters( + self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters_base, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) # Add slight tolerance in threshold characterization to consider # the case that cpu puts packets in the egress queue after we pause the egress # or the leak out is simply less than expected as we have occasionally observed @@ -1029,7 +1037,7 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) try: # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and @@ -1059,22 +1067,31 @@ def runTest(self): time.sleep(8) if check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_base, self, src_port_id, pkt, 10) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, + port_list['dst'][dst_port_id], TRANSMITTED_PKTS, + xmit_counters_base, self, src_port_id, pkt, 10) # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) test_stage = 'after send packets short of triggering PFC' - sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\txmit_counters {}\n\txmit_counters_base {}\n'.format(test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) + sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\t' + \ + 'xmit_counters {}\n\txmit_counters_base {}\n'.format( + test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port no pfc - assert(recv_counters[pg] == recv_counters_base[pg]), 'unexpectedly PFC counter increase, {}'.format(test_stage) + assert(recv_counters[pg] == recv_counters_base[pg]), \ + 'unexpectedly PFC counter increase, {}'.format(test_stage) # recv port no ingress drop for cntr in ingress_counters: - assert(recv_counters[cntr] == recv_counters_base[cntr]), 'unexpectedly RX drop counter increase, {}'.format(test_stage) + assert(recv_counters[cntr] == recv_counters_base[cntr]), \ + 'unexpectedly RX drop counter increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), 'unexpectedly TX drop counter increase, {}'.format(test_stage) + assert(xmit_counters[cntr] == xmit_counters_base[cntr]), \ + 'unexpectedly TX drop counter increase, {}'.format(test_stage) # send 1 packet to trigger pfc send_packet(self, src_port_id, pkt, 1 + 2 * margin) @@ -1083,18 +1100,23 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here recv_counters_base = recv_counters - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) test_stage = 'after send a few packets to trigger PFC' sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\txmit_counters {}\n\txmit_counters_base {}\n'.format(test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port pfc - assert(recv_counters[pg] > recv_counters_base[pg]), 'unexpectedly PFC counter not increase, {}'.format(test_stage) + assert(recv_counters[pg] > recv_counters_base[pg]), \ + 'unexpectedly PFC counter not increase, {}'.format(test_stage) # recv port no ingress drop for cntr in ingress_counters: - assert(recv_counters[cntr] == recv_counters_base[cntr]), 'unexpectedly RX drop counter increase, {}'.format(test_stage) + assert(recv_counters[cntr] == recv_counters_base[cntr]), \ + 'unexpectedly RX drop counter increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), 'unexpectedly TX drop counter increase, {}'.format(test_stage) + assert(xmit_counters[cntr] == xmit_counters_base[cntr]), \ + 'unexpectedly TX drop counter increase, {}'.format(test_stage) # send packets short of ingress drop send_packet(self, src_port_id, pkt, (pkts_num_trig_ingr_drp - @@ -1104,18 +1126,25 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here recv_counters_base = recv_counters - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) test_stage = 'after send packets short of ingress drop' - sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\txmit_counters {}\n\txmit_counters_base {}\n'.format(test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) + sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\t' + \ + 'xmit_counters {}\n\txmit_counters_base {}\n'.format( + test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port pfc - assert(recv_counters[pg] > recv_counters_base[pg]), 'unexpectedly PFC counter not increase, {}'.format(test_stage) + assert(recv_counters[pg] > recv_counters_base[pg]), \ + 'unexpectedly PFC counter not increase, {}'.format(test_stage) # recv port no ingress drop for cntr in ingress_counters: - assert(recv_counters[cntr] == recv_counters_base[cntr]), 'unexpectedly RX drop counter increase, {}'.format(test_stage) + assert(recv_counters[cntr] == recv_counters_base[cntr]), \ + 'unexpectedly RX drop counter increase, {}'.format(test_stage) # xmit port no egress drop for cntr in egress_counters: - assert(xmit_counters[cntr] == xmit_counters_base[cntr]), 'unexpectedly TX drop counter increase, {}'.format(test_stage) + assert(xmit_counters[cntr] == xmit_counters_base[cntr]), \ + 'unexpectedly TX drop counter increase, {}'.format(test_stage) # send 1 packet to trigger ingress drop send_packet(self, src_port_id, pkt, 1 + 2 * margin) @@ -1124,13 +1153,18 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here recv_counters_base = recv_counters - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) test_stage = 'after send a few packets to trigger drop' - sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\txmit_counters {}\n\txmit_counters_base {}\n'.format(test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) + sys.stderr.write('{}:\n\trecv_counters {}\n\trecv_counters_base {}\n\t' + \ + 'xmit_counters {}\n\txmit_counters_base {}\n'.format( + test_stage, recv_counters, recv_counters_base, xmit_counters, xmit_counters_base)) # recv port pfc - assert(recv_counters[pg] > recv_counters_base[pg]), 'unexpectedly PFC counter not increase, {}'.format(test_stage) - + assert(recv_counters[pg] > recv_counters_base[pg]), \ + 'unexpectedly PFC counter not increase, {}'.format(test_stage) + # recv port ingress drop if platform_asic and platform_asic == "broadcom-dnx": logging.info ("On J2C+ don't support port level drop counters - so ignoring this step for now") else: @@ -1143,14 +1177,14 @@ def runTest(self): if '201811' not in sonic_version and 'mellanox' in asic_type: pg_dropped_cntrs = sai_thrift_read_pg_drop_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) logging.info("Dropped packet counters on port #{} :{} {} packets, current dscp: {}".format( src_port_id, pg_dropped_cntrs[dscp], pg_dropped_cntrs_old[dscp], dscp)) # Check that counters per lossless PG increased assert pg_dropped_cntrs[dscp] > pg_dropped_cntrs_old[dscp] if '201811' not in sonic_version and 'cisco-8000' in asic_type: pg_dropped_cntrs = sai_thrift_read_pg_drop_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) logging.info("Dropped packet counters on port #{} :{} {} packets, current dscp: {}".format( src_port_id, pg_dropped_cntrs[dscp], pg_dropped_cntrs_old[dscp], dscp)) # check that counters per lossless PG increased @@ -1162,13 +1196,13 @@ def runTest(self): assert pg_dropped_cntrs[i] == pg_dropped_cntrs_old[i] finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) class LosslessVoq(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp = int(self.test_params['dscp']) @@ -1257,9 +1291,12 @@ def runTest(self): print("actual dst_port_id: {}".format(dst_port_id), file=sys.stderr) # get a snapshot of counter values at recv and transmit ports - recv_counters_base1, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_1_id]) - recv_counters_base2, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_2_id]) - xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters_base1, queue_counters = sai_thrift_read_port_counters( + self.src_client, port_list['src'][src_port_1_id]) + recv_counters_base2, queue_counters = sai_thrift_read_port_counters( + self.src_client, port_list['src'][src_port_2_id]) + xmit_counters_base, queue_counters = sai_thrift_read_port_counters( + self.dst_client, port_list['dst'][dst_port_id]) # Add slight tolerance in threshold characterization to consider # the case that cpu puts packets in the egress queue after we pause the egress # or the leak out is simply less than expected as we have occasionally observed @@ -1268,7 +1305,7 @@ def runTest(self): else: margin = 2 - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) try: assert(fill_leakout_plus_one(self, src_port_1_id, dst_port_id, pkt, int(self.test_params['pg']), asic_type)) @@ -1292,9 +1329,12 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here - recv_counters1, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_1_id]) - recv_counters2, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_2_id]) - xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters1, queue_counters = sai_thrift_read_port_counters( + self.src_client, port_list['src'][src_port_1_id]) + recv_counters2, queue_counters = sai_thrift_read_port_counters( + self.src_client, port_list['src'][src_port_2_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters( + self.dst_client, port_list['dst'][dst_port_id]) # recv port no pfc pfc_txd = recv_counters1[pg] - recv_counters_base1[pg] assert pfc_txd == 0, "Unexpected PFC TX {} on port {}".format(pfc_txd, src_port_1_id) @@ -1330,9 +1370,12 @@ def runTest(self): # queue counters value is not of our interest here recv_counters_base1 = recv_counters1 recv_counters_base2 = recv_counters2 - recv_counters1, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_1_id]) - recv_counters2, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_2_id]) - xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters1, queue_counters = sai_thrift_read_port_counters( + self.src_client, port_list['src'][src_port_1_id]) + recv_counters2, queue_counters = sai_thrift_read_port_counters( + self.src_client, port_list['src'][src_port_2_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters( + self.dst_client, port_list['dst'][dst_port_id]) # recv port pfc assert recv_counters1[pg] > recv_counters_base1[pg], "PFC TX counters did not increase for port {}".format(src_port_1_id) assert recv_counters2[pg] > recv_counters_base2[pg], "PFC TX counters did not increase for port {}".format(src_port_2_id) @@ -1348,7 +1391,7 @@ def runTest(self): assert diff == 0, "Unexpected egress drop {} on port {}".format(diff, dst_port_id) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) # Base class used for individual PTF runs used in the following: testPfcStormWithSharedHeadroomOccupancy @@ -1401,7 +1444,7 @@ class PtfFillBuffer(PfcStormTestWithSharedHeadroom): def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) self.parse_test_params() pkts_num_trig_pfc = int(self.test_params['pkts_num_trig_pfc']) @@ -1423,14 +1466,15 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here recv_counters_base, queue_counters = sai_thrift_read_port_counters( - self.client, self.asic_type, port_list[self.src_port_id]) + self.src_client, self.asic_type, port_list['src'][self.src_port_id] + ) logging.info("Disabling xmit ports: {}".format(self.dst_port_id)) - self.sai_thrift_port_tx_disable( - self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, [self.dst_port_id]) xmit_counters_base, queue_counters = sai_thrift_read_port_counters( - self.client, self.asic_type, port_list[self.dst_port_id]) + self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id] + ) num_pkts = (pkts_num_trig_pfc + pkts_num_private_headrooom) // self.cell_occupancy logging.info("Send {} pkts to egress out of {}".format(num_pkts, self.dst_port_id)) # send packets to dst port 1, to cross into shared headrooom @@ -1441,9 +1485,9 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here recv_counters, queue_counters = sai_thrift_read_port_counters( - self.client, self.asic_type, port_list[self.src_port_id]) + self.src_client, self.asic_type, port_list['src'][self.src_port_id]) xmit_counters, queue_counters = sai_thrift_read_port_counters( - self.client, self.asic_type, port_list[self.dst_port_id]) + self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) logging.debug("Recv Counters: {}, Base: {}".format(recv_counters, recv_counters_base)) logging.debug("Xmit Counters: {}, Base: {}".format(xmit_counters, xmit_counters_base)) @@ -1462,25 +1506,29 @@ class PtfReleaseBuffer(PfcStormTestWithSharedHeadroom): def runTest(self): time.sleep(1) - switch_init(self.client) + switch_init(self.clients) self.parse_test_params() # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here - recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_id]) + recv_counters_base, queue_counters = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_id] + ) - xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.dst_port_id]) + xmit_counters_base, queue_counters = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id] + ) logging.info("Enable xmit ports: {}".format(self.dst_port_id)) - self.sai_thrift_port_tx_enable( - self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) # get new base counter values at recv ports - recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_id]) + recv_counters, queue_counters = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_id]) # no ingress drop for cntr in self.ingress_counters: assert(recv_counters[cntr] == recv_counters_base[cntr]) @@ -1490,8 +1538,10 @@ def runTest(self): time.sleep(30) # get the current snapshot of counter values at recv and transmit ports - recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_id]) - xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.dst_port_id]) + recv_counters, queue_counters = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) logging.debug("Recv Counters: {}, Base: {}".format( recv_counters, recv_counters_base)) @@ -1512,10 +1562,9 @@ class PtfEnableDstPorts(PfcStormTestWithSharedHeadroom): def runTest(self): time.sleep(1) - switch_init(self.client) + switch_init(self.clients) self.parse_test_params() - self.sai_thrift_port_tx_enable( - self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) # This test looks to measure xon threshold (pg_reset_floor) @@ -1534,7 +1583,7 @@ def get_rx_port(self, src_port_id, pkt_dst_mac, dst_port_ip, src_port_ip, dst_po def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) last_pfc_counter = 0 recv_port_counters = [] transmit_port_counters = [] @@ -1578,7 +1627,9 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here - recv_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + recv_counters_base, _ = sai_thrift_read_port_counters( + self.src_client, asic_type, port_list['src'][src_port_id] + ) # The number of packets that will trek into the headroom space; # We observe in test that if the packets are sent to multiple destination ports, @@ -1666,8 +1717,7 @@ def runTest(self): step_id = 1 step_desc = 'disable TX for dst_port_id, dst_port_2_id, dst_port_3_id' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - self.sai_thrift_port_tx_disable(self.client, asic_type, [ - dst_port_id, dst_port_2_id, dst_port_3_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id, dst_port_2_id, dst_port_3_id]) try: ''' @@ -1692,8 +1742,8 @@ def runTest(self): step_desc = 'send packets to dst port 1, occupying the xon' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - xmit_counters_base, queue_counters = sai_thrift_read_port_counters( - self.client, asic_type, port_list[dst_port_id] + xmit_counters_base, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id] ) # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and @@ -1726,15 +1776,17 @@ def runTest(self): sys.stderr.write('send_packet(src_port_id, pkt, ({} + {} - {} - {}) // {})\n'.format(pkts_num_leak_out, pkts_num_trig_pfc, pkts_num_dismiss_pfc, hysteresis, cell_occupancy)) if check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_base, self, src_port_id, pkt, 40) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, + port_list['dst'][dst_port_id], TRANSMITTED_PKTS, + xmit_counters_base, self, src_port_id, pkt, 40) # send packets to dst port 2, occupying the shared buffer step_id += 1 step_desc = 'send packets to dst port 2, occupying the shared buffer' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - xmit_2_counters_base, queue_counters = sai_thrift_read_port_counters( - self.client, asic_type, port_list[dst_port_2_id] + xmit_2_counters_base, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_2_id] ) if hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': send_packet( @@ -1759,15 +1811,15 @@ def runTest(self): sys.stderr.write('send_packet(src_port_id, pkt2, ({} + {} + {}) // {} + {} - 1)\n'.format(pkts_num_leak_out, pkts_num_dismiss_pfc, hysteresis, cell_occupancy, margin)) if check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_2_id], TRANSMITTED_PKTS, xmit_2_counters_base, self, src_port_id, pkt2, 40) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, + port_list['dst'][dst_port_2_id], TRANSMITTED_PKTS, + xmit_2_counters_base, self, src_port_id, pkt2, 40) # send 1 packet to dst port 3, triggering PFC step_id += 1 step_desc = 'send 1 packet to dst port 3, triggering PFC' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - xmit_3_counters_base, queue_counters = sai_thrift_read_port_counters( - self.client, asic_type, port_list[dst_port_3_id] - ) + xmit_3_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_3_id]) if hwsku == 'DellEMC-Z9332f-M-O16C64' or hwsku == 'DellEMC-Z9332f-O32': send_packet(self, src_port_id, pkt3, pkts_num_egr_mem + pkts_num_leak_out + 1) @@ -1780,16 +1832,20 @@ def runTest(self): sys.stderr.write('send_packet(src_port_id, pkt3, ({} + 1)\n'.format(pkts_num_leak_out)) if check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_3_id], TRANSMITTED_PKTS, xmit_3_counters_base, self, src_port_id, pkt3, 40) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, + port_list['dst'][dst_port_3_id], TRANSMITTED_PKTS, + xmit_3_counters_base, self, src_port_id, pkt3, 40) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) - xmit_2_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_2_id]) - xmit_3_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_3_id]) + recv_counters, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) + xmit_2_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_2_id]) + xmit_3_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_3_id]) port_cnt_tbl = texttable.TextTable([''] + [port_counter_fields[idx] for idx in port_counter_indexes]) port_cnt_tbl.add_row(['recv_counters_base'] + [recv_counters_base[idx] for idx in port_counter_indexes]) @@ -1816,18 +1872,17 @@ def runTest(self): step_id += 1 step_desc = 'enable TX for dst_port_2_id, to drain off buffer in dst_port_2' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_2_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_2_id], last_port=False) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here recv_counters_base = recv_counters - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) - xmit_2_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_2_id]) - xmit_3_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_3_id]) - + recv_counters, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) + xmit_2_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_2_id]) + xmit_3_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_3_id]) port_cnt_tbl = texttable.TextTable([''] + [port_counter_fields[idx] for idx in port_counter_indexes]) port_cnt_tbl.add_row(['recv_counters_base'] + [recv_counters_base[idx] for idx in port_counter_indexes]) port_cnt_tbl.add_row(['recv_counters'] + [recv_counters[idx] for idx in port_counter_indexes]) @@ -1853,13 +1908,13 @@ def runTest(self): step_id += 1 step_desc = 'enable TX for dst_port_3_id, to drain off buffer in dst_port_3' sys.stderr.write('step {}: {}\n'.format(step_id, step_desc)) - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_3_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_3_id], last_port=False) # allow enough time for the dut to sync up the counter values in counters_db time.sleep(8) # get new base counter values at recv ports # queue counters value is not of our interest here - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + recv_counters, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) port_cnt_tbl = texttable.TextTable([''] + [port_counter_fields[idx] for idx in port_counter_indexes]) port_cnt_tbl.add_row(['recv_counters_base'] + [recv_counters_base[idx] for idx in port_counter_indexes]) @@ -1877,10 +1932,12 @@ def runTest(self): time.sleep(30) # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here - recv_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) - xmit_2_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_2_id]) - xmit_3_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_3_id]) + recv_counters, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) + xmit_2_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_2_id]) + xmit_3_counters, _ = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_3_id]) port_cnt_tbl = texttable.TextTable([''] + [port_counter_fields[idx] for idx in port_counter_indexes]) port_cnt_tbl.add_row(['recv_counters_base'] + [recv_counters_base[idx] for idx in port_counter_indexes]) @@ -1893,7 +1950,7 @@ def runTest(self): port_cnt_tbl.add_row(['xmit_3_counters'] + [xmit_3_counters[idx] for idx in port_counter_indexes]) sys.stderr.write('{}\n'.format(port_cnt_tbl)) - # recv port no pfc + # recv port no pfc assert(recv_counters[pg] == recv_counters_base[pg]), 'unexpectedly trigger PFC for PG {} (counter: {}), at step {} {}'.format(pg, port_counter_fields[pg], step_id, step_desc) # recv port no ingress drop for cntr in ingress_counters: @@ -1905,15 +1962,14 @@ def runTest(self): assert(xmit_3_counters[cntr] == xmit_3_counters_base[cntr]), 'unexpectedly egress drop on xmit port 3 (counter: {}), at step {} {}'.format(port_counter_fields[cntr], step_id, step_desc) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [ - dst_port_id, dst_port_2_id, dst_port_3_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id, dst_port_2_id, dst_port_3_id]) class HdrmPoolSizeTest(sai_base_test.ThriftInterfaceDataPlane): def setUp(self): sai_base_test.ThriftInterfaceDataPlane.setUp(self) time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters self.dynamic_threshold = self.test_params.get('dynamic_threshold', False) @@ -1962,14 +2018,17 @@ def setUp(self): self.pkt_size_factor = 1 if self.pkts_num_trig_pfc: - print(("pkts num: leak_out: %d, trig_pfc: %d, hdrm_full: %d, hdrm_partial: %d, pkt_size %d" % (self.pkts_num_leak_out, - self.pkts_num_trig_pfc, self.pkts_num_hdrm_full, self.pkts_num_hdrm_partial, self.pkt_size)), file=sys.stderr) + print(("pkts num: leak_out: %d, trig_pfc: %d, hdrm_full: %d, hdrm_partial: %d, pkt_size %d" % ( + self.pkts_num_leak_out, self.pkts_num_trig_pfc, self.pkts_num_hdrm_full, + self.pkts_num_hdrm_partial, self.pkt_size)), file=sys.stderr) elif self.pkts_num_trig_pfc_shp: - print(("pkts num: leak_out: {}, trig_pfc: {}, hdrm_full: {}, hdrm_partial: {}, pkt_size {}".format(self.pkts_num_leak_out, - self.pkts_num_trig_pfc_shp, self.pkts_num_hdrm_full, self.pkts_num_hdrm_partial, self.pkt_size)), file=sys.stderr) + print(("pkts num: leak_out: {}, trig_pfc: {}, hdrm_full: {}, hdrm_partial: {}, pkt_size {}".format( + self.pkts_num_leak_out, self.pkts_num_trig_pfc_shp, self.pkts_num_hdrm_full, + self.pkts_num_hdrm_partial, self.pkt_size)), file=sys.stderr) # used only for headroom pool watermark - if all(key in self.test_params for key in ['hdrm_pool_wm_multiplier', 'buf_pool_roid', 'cell_size', 'max_headroom']): + if all(key in self.test_params for key in [ + 'hdrm_pool_wm_multiplier', 'buf_pool_roid', 'cell_size', 'max_headroom']): self.cell_size = int(self.test_params['cell_size']) self.wm_multiplier = self.test_params['hdrm_pool_wm_multiplier'] print("Wm multiplier: %d buf_pool_roid: %s" % ( @@ -2033,10 +2092,10 @@ def show_port_counter(self, asic_type, rx_base, tx_base, banner): port_cnt_tbl = texttable.TextTable([''] + [port_counter_fields[fieldIdx] for fieldIdx in port_counter_indexes]) for srcPortIdx, srcPortId in enumerate(self.src_port_ids): port_cnt_tbl.add_row(['base src_port{}_id{}'.format(srcPortIdx, srcPortId)] + [rx_base[srcPortIdx][fieldIdx] for fieldIdx in port_counter_indexes]) - rx_curr, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[srcPortId]) + rx_curr, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][srcPortId]) port_cnt_tbl.add_row([' src_port{}_id{}'.format(srcPortIdx, srcPortId)] + [rx_curr[fieldIdx] for fieldIdx in port_counter_indexes]) port_cnt_tbl.add_row(['base dst_port_id{}'.format(self.dst_port_id)] + [tx_base[fieldIdx] for fieldIdx in port_counter_indexes]) - tx_curr, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[self.dst_port_id]) + tx_curr, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][self.dst_port_id]) port_cnt_tbl.add_row([' dst_port_id{}'.format(self.dst_port_id)] + [tx_curr[fieldIdx] for fieldIdx in port_counter_indexes]) sys.stderr.write('{}\n{}\n'.format(banner, port_cnt_tbl)) @@ -2054,16 +2113,15 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here - recv_counters_bases = [sai_thrift_read_port_counters(self.client, self.asic_type, port_list[sid])[0] for sid in self.src_port_ids] - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.dst_port_id]) + recv_counters_bases = [sai_thrift_read_port_counters(self.src_client, self.asic_type, port_list['src'][sid])[0] for sid in self.src_port_ids] + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) # Pause egress of dut xmit port - self.sai_thrift_port_tx_disable( - self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, [self.dst_port_id]) try: # send packets to leak out @@ -2130,8 +2188,10 @@ def runTest(self): ip_ttl=ttl) pkt_cnt = 0 - recv_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) - while (recv_counters[sidx_dscp_pg_tuples[i][2]] == recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) and (pkt_cnt < 10): + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + while (recv_counters[sidx_dscp_pg_tuples[i][2]] == + recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) and (pkt_cnt < 10): send_packet( self, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, 1) pkt_cnt += 1 @@ -2140,8 +2200,8 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here - recv_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) - + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) time.sleep(8) # wait pfc counter refresh self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, 'To trigger PFC, send {} pkt with DSCP {} PG {} from src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) @@ -2152,7 +2212,8 @@ def runTest(self): assert(recv_counters[sidx_dscp_pg_tuples[i][2]] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) print("%d packets for sid: %d, pg: %d to trigger pfc" % ( - pkt_cnt, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], sidx_dscp_pg_tuples[i][2] - 2), file=sys.stderr) + pkt_cnt, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], sidx_dscp_pg_tuples[i][2] - 2), + file=sys.stderr) sys.stderr.flush() print("PFC triggered", file=sys.stderr) @@ -2161,7 +2222,7 @@ def runTest(self): upper_bound = 2 * margin + 1 if self.wm_multiplier: hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark( - self.client, self.buf_pool_roid) + self.src_client, self.buf_pool_roid) print("Actual headroom pool watermark value to start: %d" % hdrm_pool_wm, file=sys.stderr) assert (hdrm_pool_wm <= (upper_bound * @@ -2193,7 +2254,8 @@ def runTest(self): self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, 'To fill headroom pool, send {} pkt with DSCP {} PG {} from src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) - recv_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) # assert no ingress drop for cntr in self.ingress_counters: # corner case: in previous step in which trigger PFC, a few packets were dropped, and dropping don't keep increasing constantaly. @@ -2206,7 +2268,7 @@ def runTest(self): wm_pkt_num += (self.pkts_num_hdrm_full if i != self.pgs_num - 1 else self.pkts_num_hdrm_partial) hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark( - self.client, self.buf_pool_roid) + self.src_client, self.buf_pool_roid) expected_wm = wm_pkt_num * self.cell_size * self.wm_multiplier upper_bound_wm = expected_wm + \ (upper_bound * self.cell_size * self.wm_multiplier) @@ -2233,18 +2295,18 @@ def runTest(self): self.show_port_counter(self.asic_type, recv_counters_bases, xmit_counters_base, 'To fill last PG and trigger ingress drop, send {} pkt with DSCP {} PG {} from src_port{} to dst_port'.format(pkt_cnt, sidx_dscp_pg_tuples[i][1], sidx_dscp_pg_tuples[i][2], sidx_dscp_pg_tuples[i][0])) - recv_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) - + recv_counters, _ = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][self.src_port_ids[sidx_dscp_pg_tuples[i][0]]]) if platform_asic and platform_asic == "broadcom-dnx": logging.info("On J2C+ don't support port level drop counters - so ignoring this step for now") else: # assert ingress drop for cntr in self.ingress_counters: - assert( - recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) + assert(recv_counters[cntr] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][cntr]) # assert no egress drop at the dut xmit port - xmit_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.dst_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters( + self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) if platform_asic and platform_asic == "broadcom-dnx": logging.info("On J2C+ don't support port level drop counters - so ignoring this step for now") @@ -2255,7 +2317,7 @@ def runTest(self): print("pg hdrm filled", file=sys.stderr) if self.wm_multiplier: # assert hdrm pool wm still remains the same - hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark(self.client, self.buf_pool_roid) + hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark(self.src_client, self.buf_pool_roid) sys.stderr.write('After PG headroom filled, actual headroom pool watermark {}, upper_bound {}\n'.format(hdrm_pool_wm, upper_bound_wm)) if 'innovium' not in self.asic_type: assert(expected_wm <= hdrm_pool_wm) @@ -2263,20 +2325,19 @@ def runTest(self): # at this point headroom pool should be full. send few more packets to continue causing drops print("overflow headroom pool", file=sys.stderr) send_packet(self, self.src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, 10) - hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark(self.client, self.buf_pool_roid) + hdrm_pool_wm = sai_thrift_read_headroom_pool_watermark(self.src_client, self.buf_pool_roid) assert(hdrm_pool_wm <= self.max_headroom) sys.stderr.flush() finally: - self.sai_thrift_port_tx_enable( - self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) class SharedResSizeTest(sai_base_test.ThriftInterfaceDataPlane): def setUp(self): sai_base_test.ThriftInterfaceDataPlane.setUp(self) time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters self.testbed_type = self.test_params['testbed_type'] @@ -2369,12 +2430,14 @@ def runTest(self): self.cell_size >= self.shared_limit_bytes # get a snapshot of counter values at recv and transmit ports - recv_counters_bases = [sai_thrift_read_port_counters(self.client, self.asic_type, port_list[sid])[0] for sid in self.src_port_ids] - xmit_counters_bases = [sai_thrift_read_port_counters(self.client, self.asic_type, port_list[sid])[0] for sid in self.dst_port_ids] + recv_counters_bases = [sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][sid])[0] for sid in self.src_port_ids] + xmit_counters_bases = [sai_thrift_read_port_counters( + self.dst_client, self.asic_type, port_list['dst'][sid])[0] for sid in self.dst_port_ids] # Disable all dst ports uniq_dst_ports = list(set(self.dst_port_ids)) - self.sai_thrift_port_tx_disable(self.client, self.asic_type, uniq_dst_ports) + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, uniq_dst_ports) try: for i in range(len(self.src_port_ids)): @@ -2406,7 +2469,8 @@ def runTest(self): "Verifying XOFF hasn't been triggered yet on final iteration", file=sys.stderr) sys.stderr.flush() time.sleep(8) - recv_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[src_port_id])[0] + recv_counters = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][src_port_id])[0] xoff_txd = recv_counters[self.pg_cntr_indices[i]] - \ recv_counters_bases[i][self.pg_cntr_indices[i]] assert xoff_txd == 0, "XOFF triggered too early on final iteration, XOFF count is %d" % xoff_txd @@ -2428,14 +2492,17 @@ def runTest(self): "Verifying XOFF has now been triggered on final iteration", file=sys.stderr) sys.stderr.flush() time.sleep(8) - recv_counters = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[src_port_id])[0] + recv_counters = sai_thrift_read_port_counters( + self.src_client, self.asic_type, port_list['src'][src_port_id])[0] xoff_txd = recv_counters[self.pg_cntr_indices[i]] - \ recv_counters_bases[i][self.pg_cntr_indices[i]] assert xoff_txd > 0, "Failed to trigger XOFF on final iteration" # Verify no ingress/egress drops for all ports - recv_counters_list = [sai_thrift_read_port_counters(self.client, self.asic_type, port_list[sid])[0] for sid in self.src_port_ids] - xmit_counters_list = [sai_thrift_read_port_counters(self.client, self.asic_type, port_list[sid])[0] for sid in self.dst_port_ids] + recv_counters_list = [sai_thrift_read_port_counters(self.src_client, self.asic_type, port_list['src'][sid])[ + 0] for sid in self.src_port_ids] + xmit_counters_list = [sai_thrift_read_port_counters(self.dst_client, self.asic_type, port_list['dst'][sid])[ + 0] for sid in self.dst_port_ids] for i in range(len(self.src_port_ids)): for cntr in self.ingress_counters: drops = recv_counters_list[i][cntr] - \ @@ -2447,15 +2514,14 @@ def runTest(self): assert drops == 0, "Detected %d egress drops" % drops finally: - self.sai_thrift_port_tx_enable( - self.client, self.asic_type, uniq_dst_ports) + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, uniq_dst_ports) # TODO: remove sai_thrift_clear_all_counters and change to use incremental counter values class DscpEcnSend(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp = int(self.test_params['dscp']) @@ -2473,19 +2539,21 @@ def runTest(self): limit = self.test_params['limit'] min_limit = self.test_params['min_limit'] cell_size = self.test_params['cell_size'] + asic_type = self.test_params['sonic_asic_type'] # get counter names to query ingress_counters, egress_counters = get_counter_names(sonic_version) # STOP PORT FUNCTION sched_prof_id = sai_thrift_create_scheduler_profile( - self.client, STOP_PORT_MAX_RATE) + self.src_client, STOP_PORT_MAX_RATE) attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) attr = sai_thrift_attribute_t( id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) - self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.dst_client.sai_thrift_set_port_attribute(port_list['dst'][dst_port_id], attr) # Clear Counters - sai_thrift_clear_all_counters(self.client) + sai_thrift_clear_all_counters(self.src_client, 'src') + sai_thrift_clear_all_counters(self.dst_client, 'dst') # send packets try: @@ -2509,12 +2577,13 @@ def runTest(self): # Read Counters print("DST port counters: ") - port_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + port_counters, queue_counters = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) print(port_counters) print(queue_counters) # Clear Counters - sai_thrift_clear_all_counters(self.client) + sai_thrift_clear_all_counters(self.src_client, 'src') + sai_thrift_clear_all_counters(self.dst_client, 'dst') # Set receiving socket buffers to some big value for p in list(self.dataplane.ports.values()): @@ -2523,12 +2592,12 @@ def runTest(self): # RELEASE PORT sched_prof_id = sai_thrift_create_scheduler_profile( - self.client, RELEASE_PORT_MAX_RATE) + self.src_client, RELEASE_PORT_MAX_RATE) attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) attr = sai_thrift_attribute_t( id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) - self.client.sai_thrift_set_port_attribute( - port_list[dst_port_id], attr) + self.dst_client.sai_thrift_set_port_attribute( + port_list['dst'][dst_port_id], attr) # if (ecn == 1) - capture and parse all incoming packets marked_cnt = 0 @@ -2567,7 +2636,7 @@ def runTest(self): time.sleep(5) # Read Counters print("DST port counters: ") - port_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + port_counters, queue_counters = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) print(port_counters) print(queue_counters) if (ecn == 0): @@ -2590,18 +2659,18 @@ def runTest(self): finally: # RELEASE PORT sched_prof_id = sai_thrift_create_scheduler_profile( - self.client, RELEASE_PORT_MAX_RATE) + self.src_client, RELEASE_PORT_MAX_RATE) attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) attr = sai_thrift_attribute_t( id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) - self.client.sai_thrift_set_port_attribute( - port_list[dst_port_id], attr) + self.dst_client.sai_thrift_set_port_attribute( + port_list['dst'][dst_port_id], attr) print("END OF TEST") class WRRtest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) # Parse input parameters ecn = int(self.test_params['ecn']) @@ -2615,9 +2684,9 @@ def runTest(self): src_port_mac = self.dataplane.get_mac(0, src_port_id) qos_remap_enable = bool(self.test_params.get('qos_remap_enable', False)) print("dst_port_id: %d, src_port_id: %d qos_remap_enable: %d" % - (dst_port_id, src_port_id, qos_remap_enable), file=sys.stderr) + (dst_port_id, src_port_id, qos_remap_enable)) print("dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % ( - dst_port_mac, src_port_mac, src_port_ip, dst_port_ip), file=sys.stderr) + dst_port_mac, src_port_mac, src_port_ip, dst_port_ip)) asic_type = self.test_params['sonic_asic_type'] default_packet_length = 1500 exp_ip_id = 110 @@ -2647,7 +2716,8 @@ def runTest(self): # 46 5 5 # 48 6 6 prio_list = [3, 4, 8, 0, 5, 46, 48] - q_pkt_cnt = [queue_3_num_of_pkts, queue_4_num_of_pkts, queue_0_num_of_pkts, queue_1_num_of_pkts, queue_2_num_of_pkts, queue_5_num_of_pkts, queue_6_num_of_pkts] + q_pkt_cnt = [queue_3_num_of_pkts, queue_4_num_of_pkts, queue_0_num_of_pkts, + queue_1_num_of_pkts, queue_2_num_of_pkts, queue_5_num_of_pkts, queue_6_num_of_pkts] else: # When qos_remap is enabled, the map is as below # DSCP TC QUEUE @@ -2658,10 +2728,12 @@ def runTest(self): # 46 5 5 # 48 7 7 prio_list = [3, 4, 8, 0, 46, 48] - q_pkt_cnt = [queue_3_num_of_pkts, queue_4_num_of_pkts, queue_0_num_of_pkts, queue_1_num_of_pkts, queue_5_num_of_pkts, queue_7_num_of_pkts] + q_pkt_cnt = [queue_3_num_of_pkts, queue_4_num_of_pkts, queue_0_num_of_pkts, + queue_1_num_of_pkts, queue_5_num_of_pkts, queue_7_num_of_pkts] else: prio_list = [3, 4, 1, 0, 2, 5, 6] - q_pkt_cnt = [queue_3_num_of_pkts, queue_4_num_of_pkts, queue_1_num_of_pkts, queue_0_num_of_pkts, queue_2_num_of_pkts, queue_5_num_of_pkts, queue_6_num_of_pkts] + q_pkt_cnt = [queue_3_num_of_pkts, queue_4_num_of_pkts, queue_1_num_of_pkts, + queue_0_num_of_pkts, queue_2_num_of_pkts, queue_5_num_of_pkts, queue_6_num_of_pkts] q_cnt_sum = sum(q_pkt_cnt) # Send packets to leak out pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac @@ -2690,12 +2762,12 @@ def runTest(self): ) print("actual dst_port_id: {}".format(dst_port_id), file=sys.stderr) - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) send_packet(self, src_port_id, pkt, pkts_num_leak_out) # Get a snapshot of counter values - port_counters_base, queue_counters_base = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + port_counters_base, queue_counters_base = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # Send packets to each queue based on priority/dscp field for prio, pkt_cnt in zip(prio_list, q_pkt_cnt): @@ -2716,7 +2788,7 @@ def runTest(self): p.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 41943040) # Release port - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) cnt = 0 pkts = [] @@ -2775,7 +2847,7 @@ def runTest(self): # Read counters print("DST port counters: ") - port_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + port_counters, queue_counters = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) print(list(map(operator.sub, queue_counters, queue_counters_base)), file=sys.stderr) @@ -2785,7 +2857,7 @@ def runTest(self): class LossyQueueTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp = int(self.test_params['dscp']) @@ -2852,8 +2924,8 @@ def runTest(self): # get a snapshot of counter values at recv and transmit ports # queue_counters value is not of our interest here - recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # add slight tolerance in threshold characterization to consider # the case that cpu puts packets in the egress queue after we pause the egress # or the leak out is simply less than expected as we have occasionally observed @@ -2866,13 +2938,14 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) try: # Since there is variability in packet leakout in hwsku Arista-7050CX3-32S-D48C8 and # Arista-7050CX3-32S-C32. Starting with zero pkts_num_leak_out and trying to find # actual leakout by sending packets and reading actual leakout from HW - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or \ + hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': pkts_num_leak_out = 0 if asic_type == 'cisco-8000': @@ -2889,8 +2962,11 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_egr_drp - 1 - margin) - if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': - xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + + if hwsku == 'Arista-7050CX3-32S-D48C8' or hwsku == 'Arista-7050CX3-32S-C32' or \ + hwsku == 'DellEMC-Z9332f-O32' or hwsku == 'DellEMC-Z9332f-M-O16C64': + xmit_counters, queue_counters = sai_thrift_read_port_counters( + self.dst_client, asic_type, port_list['dst'][dst_port_id]) actual_pkts_num_leak_out = xmit_counters[TRANSMITTED_PKTS] - xmit_counters_base[TRANSMITTED_PKTS] send_packet(self, src_port_id, pkt, actual_pkts_num_leak_out) @@ -2898,8 +2974,8 @@ def runTest(self): time.sleep(8) # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here - recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters, queue_counters = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # recv port no pfc assert(recv_counters[pg] == recv_counters_base[pg]) # recv port no ingress drop @@ -2915,8 +2991,8 @@ def runTest(self): time.sleep(8) # get a snapshot of counter values at recv and transmit ports # queue counters value is not of our interest here - recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters, queue_counters = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # recv port no pfc assert(recv_counters[pg] == recv_counters_base[pg]) # recv port no ingress drop @@ -2934,14 +3010,14 @@ def runTest(self): assert(xmit_counters[cntr] > xmit_counters_base[cntr]) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) class LossyQueueVoqTest(sai_base_test.ThriftInterfaceDataPlane): def setUp(self): sai_base_test.ThriftInterfaceDataPlane.setUp(self) time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters self.dscp = int(self.test_params['dscp']) self.ecn = int(self.test_params['ecn']) @@ -2993,8 +3069,8 @@ def runTest(self): flow_1_udp = 2048 pkt = self._build_testing_pkt(flow_1_udp) - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.dst_port_id]) + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][self.dst_port_id]) # add slight tolerance in threshold characterization to consider # the case that npu puts packets in the egress queue after we pause the egress # or the leak out is simply less than expected as we have occasionally observed @@ -3003,7 +3079,7 @@ def runTest(self): else: margin = 2 - self.sai_thrift_port_tx_disable(self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, [self.dst_port_id]) try: # send packets to begin egress drop on flow1, requires sending the "single" @@ -3014,8 +3090,8 @@ def runTest(self): send_packet(self, self.src_port_id, pkt, self.pkts_num_trig_egr_drp) time.sleep(2) # Verify egress drop - xmit_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.dst_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][self.dst_port_id]) for cntr in egress_counters: diff = xmit_counters[cntr] - xmit_counters_base[cntr] assert diff > 0, "Failed to cause TX drop on port {}".format(self.dst_port_id) @@ -3029,8 +3105,8 @@ def runTest(self): xmit_counters_base = xmit_counters send_packet(self, self.src_port_id, pkt2, 1) time.sleep(2) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.dst_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][self.dst_port_id]) drop_counts = [xmit_counters[cntr] - xmit_counters_base[cntr] for cntr in egress_counters] assert len(set(drop_counts)) == 1, \ "Egress drop counters were different at port {}, counts: {}".format(self.dst_port_id, drop_counts) @@ -3048,14 +3124,14 @@ def runTest(self): print("Did not find a second flow in mode '{}'".format(self.flow_config), file=sys.stderr) assert self.flow_config == "shared", "Failed to find a flow that uses a second queue despite being in mode '{}'".format(self.flow_config) # Cleanup for multi-flow test - self.sai_thrift_port_tx_enable(self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) time.sleep(2) # Test multi-flow with detected multi-flow udp ports - self.sai_thrift_port_tx_disable(self.client, self.asic_type, [self.dst_port_id]) - recv_counters_base, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.src_port_id]) - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, self.asic_type, [self.dst_port_id]) + recv_counters_base, _ = sai_thrift_read_port_counters(self.src_client, self.asic_type, + port_list['src'][self.src_port_id]) + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][self.dst_port_id]) assert fill_leakout_plus_one(self, self.src_port_id, self.dst_port_id, pkt, int(self.test_params['pg']), self.asic_type), \ "Failed to fill leakout on dest port {}".format(self.dst_port_id) @@ -3071,10 +3147,10 @@ def runTest(self): send_packet(self, self.src_port_id, pkt2, short_of_drop_npkts) # allow enough time for counters to update time.sleep(2) - recv_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, - port_list[self.dst_port_id]) + recv_counters, _ = sai_thrift_read_port_counters(self.src_client, self.asic_type, + port_list['src'][self.src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, + port_list['dst'][self.dst_port_id]) # recv port no pfc diff = recv_counters[self.pg] - recv_counters_base[self.pg] assert diff == 0, "Unexpected PFC frames {}".format(diff) @@ -3094,8 +3170,8 @@ def runTest(self): send_packet(self, self.src_port_id, pkt2, npkts) # allow enough time for counters to update time.sleep(2) - recv_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.src_port_id]) - xmit_counters, _ = sai_thrift_read_port_counters(self.client, self.asic_type, port_list[self.dst_port_id]) + recv_counters, _ = sai_thrift_read_port_counters(self.src_client, self.asic_type, port_list['src'][self.src_port_id]) + xmit_counters, _ = sai_thrift_read_port_counters(self.dst_client, self.asic_type, port_list['dst'][self.dst_port_id]) # recv port no pfc diff = recv_counters[self.pg] - recv_counters_base[self.pg] assert diff == 0, "Unexpected PFC frames {}".format(diff) @@ -3109,7 +3185,7 @@ def runTest(self): assert drops > 0, "Failed to detect egress drops ({})".format(drops) print("Successfully dropped {} packets".format(drops), file=sys.stderr) finally: - self.sai_thrift_port_tx_enable(self.client, self.asic_type, [self.dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, self.asic_type, [self.dst_port_id]) # pg shared pool applied to both lossy and lossless traffic @@ -3132,17 +3208,17 @@ def show_stats(self, banner, + ['Ing Pg{} Pkt'.format(pg)] + ['Ing Pg{} Share Wm'.format(pg)]) if sport_cntr == None: - sport_cntr, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + sport_cntr, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) if sport_pg_cntr == None: - sport_pg_cntr = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) + sport_pg_cntr = sai_thrift_read_pg_counters(self.src_client, port_list['src'][src_port_id]) if sport_pg_share_wm == None: - sport_pg_share_wm = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[src_port_id]) + sport_pg_share_wm = sai_thrift_read_pg_shared_watermark(self.src_client, asic_type, port_list['src'][src_port_id]) if dport_cntr == None: - dport_cntr, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + dport_cntr, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) if dport_pg_cntr == None: - dport_pg_cntr = sai_thrift_read_pg_counters(self.client, port_list[dst_port_id]) + dport_pg_cntr = sai_thrift_read_pg_counters(self.dst_client, port_list['dst'][dst_port_id]) if dport_pg_share_wm == None: - dport_pg_share_wm = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[dst_port_id]) + dport_pg_share_wm = sai_thrift_read_pg_shared_watermark(self.dst_client, asic_type, port_list['dst'][dst_port_id]) stats_tbl.add_row(['base src port'] + [sport_cntr_base[fieldIdx] for fieldIdx in port_counter_indexes] + [sport_pg_cntr_base[pg]] @@ -3163,7 +3239,7 @@ def show_stats(self, banner, def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp = int(self.test_params['dscp']) @@ -3228,18 +3304,18 @@ def runTest(self): "pkts_num_margin") else 2 # Get a snapshot of counter values - recv_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + recv_counters_base, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # For TH3, some packets stay in egress memory and doesn't show up in shared buffer or leakout if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) - pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) - dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[dst_port_id]) - pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[src_port_id]) - dst_pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.client, asic_type, port_list[dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) + pg_cntrs_base = sai_thrift_read_pg_counters(self.src_client, port_list['src'][src_port_id]) + dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.dst_client, port_list['dst'][dst_port_id]) + pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.src_client, asic_type, port_list['src'][src_port_id]) + dst_pg_shared_wm_res_base = sai_thrift_read_pg_shared_watermark(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # send packets try: @@ -3249,7 +3325,7 @@ def runTest(self): if check_leackout_compensation_support(asic_type, hwsku): pkts_num_leak_out = 0 - xmit_counters_history, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + xmit_counters_history, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) pg_min_pkts_num = 0 # send packets to fill pg min but not trek into shared pool @@ -3269,12 +3345,12 @@ def runTest(self): time.sleep(8) if pg_min_pkts_num > 0 and check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_history, self, src_port_id, pkt, 40) + dynamically_compensate_leakout(self.src_client, asic_type, sai_thrift_read_port_counters, port_list['dst'][dst_port_id], TRANSMITTED_PKTS, xmit_counters_history, self, src_port_id, pkt, 40) pg_cntrs = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) pg_shared_wm_res = sai_thrift_read_pg_shared_watermark( - self.client, asic_type, port_list[src_port_id]) + self.src_client, asic_type, port_list['src'][src_port_id]) print("Received packets: %d" % (pg_cntrs[pg] - pg_cntrs_base[pg]), file=sys.stderr) print("Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ( @@ -3324,19 +3400,19 @@ def runTest(self): print("pkts num to send: %d, total pkts: %d, pg shared: %d" % (pkts_num, expected_wm, total_shared), file=sys.stderr) - send_packet(self, src_port_id, pkt, pkts_num) + send_packet(self, src_port_id, pkt,int(pkts_num)) time.sleep(8) if pg_min_pkts_num == 0 and pkts_num <= 1 + margin and check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_history, self, src_port_id, pkt, 40) + dynamically_compensate_leakout(self.src_client, asic_type, sai_thrift_read_port_counters, port_list['dst'][dst_port_id], TRANSMITTED_PKTS, xmit_counters_history, self, src_port_id, pkt, 40) # these counters are clear on read, ensure counter polling # is disabled before the test pg_shared_wm_res = sai_thrift_read_pg_shared_watermark( - self.client, asic_type, port_list[src_port_id]) + self.src_client, asic_type, port_list['src'][src_port_id]) pg_cntrs = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("Received packets: %d" % (pg_cntrs[pg] - pg_cntrs_base[pg]), file=sys.stderr) @@ -3366,13 +3442,12 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num) time.sleep(8) pg_shared_wm_res = sai_thrift_read_pg_shared_watermark( - self.client, asic_type, port_list[src_port_id]) + self.src_client, asic_type, port_list['src'][src_port_id]) pg_cntrs = sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("Received packets: %d" % (pg_cntrs[pg] - pg_cntrs_base[pg]), file=sys.stderr) - self.show_stats('To overflow PG share pool, send {} pkt'.format(pkts_num), asic_type, pg, src_port_id, dst_port_id, ingress_counters, egress_counters, recv_counters_base, pg_cntrs_base, pg_shared_wm_res_base, @@ -3385,15 +3460,15 @@ def runTest(self): if platform_asic and platform_asic == "broadcom-dnx": print("exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % ( pkts_num, ((expected_wm + cell_occupancy) * (packet_length + internal_hdr_size)), pg_shared_wm_res[pg]), file=sys.stderr) - assert (expected_wm * (packet_length + internal_hdr_size) <= (expected_wm + margin + cell_occupancy) * (packet_length + internal_hdr_size)) - + assert (expected_wm * (packet_length + internal_hdr_size) <= ( + expected_wm + margin + cell_occupancy) * (packet_length + internal_hdr_size)) else: print("exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % ( pkts_num, ((expected_wm + cell_occupancy) * cell_size), pg_shared_wm_res[pg]), file=sys.stderr) - assert (expected_wm * cell_size <= pg_shared_wm_res[pg] <= (expected_wm + margin + cell_occupancy) * cell_size) - + assert (expected_wm * cell_size <= pg_shared_wm_res[pg] <= ( + expected_wm + margin + cell_occupancy) * cell_size) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) # pg headroom is a notion for lossless traffic only @@ -3401,7 +3476,7 @@ def runTest(self): class PGHeadroomWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp = int(self.test_params['dscp']) @@ -3470,9 +3545,9 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem = int(self.test_params['pkts_num_egr_mem']) - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # send packets try: @@ -3492,10 +3567,9 @@ def runTest(self): time.sleep(8) if check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_base, self, src_port_id, pkt, 30) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, port_list['dst'][dst_port_id], TRANSMITTED_PKTS, xmit_counters_base, self, src_port_id, pkt, 30) - q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks( - self.client, port_list[src_port_id]) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.src_client, port_list['src'][src_port_id]) if platform_asic and platform_asic == "broadcom-dnx": logging.info ("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ "stat - so ignoring this step for now") @@ -3524,7 +3598,7 @@ def runTest(self): # these counters are clear on read, ensure counter polling # is disabled before the test q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("lower bound: %d, actual value: %d, upper bound: %d" % ((expected_wm - margin) * cell_size * cell_occupancy, pg_headroom_wm_res[pg], ((expected_wm + margin) * cell_size * cell_occupancy)), file=sys.stderr) @@ -3544,7 +3618,7 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num) time.sleep(8) q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("exceeded pkts num sent: %d" % (pkts_num), file=sys.stderr) print("lower bound: %d, actual value: %d, upper bound: %d" % ((expected_wm - margin) * cell_size * cell_occupancy, pg_headroom_wm_res[pg], @@ -3561,12 +3635,12 @@ def runTest(self): cell_occupancy <= pg_headroom_wm_res[pg]) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) class PGDropTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp=int(self.test_params['dscp']) @@ -3614,11 +3688,10 @@ def runTest(self): pass_iterations=0 assert iterations > 0, "Need at least 1 iteration" for test_i in range(iterations): - self.sai_thrift_port_tx_disable( - self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) pg_dropped_cntrs_base=sai_thrift_read_pg_drop_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) # Send packets to trigger PFC print("Iteration {}/{}, sending {} packets to trigger PFC".format( @@ -3628,11 +3701,11 @@ def runTest(self): # Account for leakout if 'cisco-8000' in asic_type: queue_counters=sai_thrift_read_queue_occupancy( - self.client, dst_port_id) + self.dst_client, "dst", dst_port_id) occ_pkts=queue_counters[queue] // (packet_length + 24) leaked_pkts=pkts_num_trig_pfc - occ_pkts print("resending leaked packets {}".format( - leaked_pkts), file=sys.stderr) + leaked_pkts)) send_packet(self, src_port_id, pkt, leaked_pkts) # Trigger drop @@ -3642,7 +3715,7 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkt_inc) pg_dropped_cntrs=sai_thrift_read_pg_drop_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) pg_drops=pg_dropped_cntrs[pg] - pg_dropped_cntrs_base[pg] actual_num_trig_ingr_drp=pkts_num_trig_ingr_drp + \ @@ -3653,16 +3726,16 @@ def runTest(self): print("expected trig drop: {}, actual trig drop: {}, diff: {}".format( pkts_num_trig_ingr_drp, actual_num_trig_ingr_drp, ingr_drop_diff), file=sys.stderr) - self.sai_thrift_port_tx_enable( - self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) print("pass iterations: {}, total iterations: {}, margin: {}".format( pass_iterations, iterations, margin), file=sys.stderr) assert pass_iterations >= int( - 0.75 * iterations), "Passed iterations {} insufficient to meet minimum required iterations {}".format(pass_iterations, int(0.75 * iterations)) + 0.75 * iterations), "Passed iterations {} insufficient to meet minimum required iterations {}".format( + pass_iterations, int(0.75 * iterations)) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) class QSharedWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): @@ -3684,18 +3757,20 @@ def show_stats(self, banner, asic_type, + ['Que{} Share Wm'.format(que)]) if sport_cntr == None: - sport_cntr, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + sport_cntr, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) if sport_pg_cntr == None: - sport_pg_cntr = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) + sport_pg_cntr = sai_thrift_read_pg_counters(self.src_client, port_list['src'][src_port_id]) if None in [sport_pg_share_wm, sport_pg_headroom_wm, sport_que_share_wm]: - sport_que_share_wm, sport_pg_share_wm, sport_pg_headroom_wm = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + sport_que_share_wm, sport_pg_share_wm, sport_pg_headroom_wm = \ + sai_thrift_read_port_watermarks(self.src_client, port_list['src'][src_port_id]) if dport_cntr == None: - dport_cntr, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + dport_cntr, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) if dport_pg_cntr == None: - dport_pg_cntr = sai_thrift_read_pg_counters(self.client, port_list[dst_port_id]) + dport_pg_cntr = sai_thrift_read_pg_counters(self.dst_client, port_list['dst'][dst_port_id]) if None in [dport_pg_share_wm, dport_pg_headroom_wm, dport_que_share_wm]: - dport_que_share_wm, dport_pg_share_wm, dport_pg_headroom_wm = sai_thrift_read_port_watermarks(self.client, port_list[dst_port_id]) + dport_que_share_wm, dport_pg_share_wm, dport_pg_headroom_wm = \ + sai_thrift_read_port_watermarks(self.src_client, port_list['dst'][dst_port_id]) stats_tbl.add_row(['base src port'] + [sport_cntr_base[fieldIdx] for fieldIdx in port_counter_indexes] @@ -3725,7 +3800,7 @@ def show_stats(self, banner, asic_type, def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters ingress_counters, egress_counters = get_counter_names(self.test_params['sonic_version']) @@ -3799,13 +3874,13 @@ def runTest(self): if 'pkts_num_egr_mem' in list(self.test_params.keys()): pkts_num_egr_mem=int(self.test_params['pkts_num_egr_mem']) - recv_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) - xmit_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) - pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) - dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[dst_port_id]) - q_wm_res_base, pg_shared_wm_res_base, pg_headroom_wm_res_base = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) - dst_q_wm_res_base, dst_pg_shared_wm_res_base, dst_pg_headroom_wm_res_base = sai_thrift_read_port_watermarks(self.client, port_list[dst_port_id]) + recv_counters_base, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) + xmit_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) + pg_cntrs_base = sai_thrift_read_pg_counters(self.src_client, port_list['src'][src_port_id]) + dst_pg_cntrs_base = sai_thrift_read_pg_counters(self.dst_client, port_list['dst'][dst_port_id]) + q_wm_res_base, pg_shared_wm_res_base, pg_headroom_wm_res_base = sai_thrift_read_port_watermarks(self.src_client, port_list['src'][src_port_id]) + dst_q_wm_res_base, dst_pg_shared_wm_res_base, dst_pg_headroom_wm_res_base = sai_thrift_read_port_watermarks(self.dst_client, port_list['dst'][dst_port_id]) # send packets try: @@ -3815,7 +3890,7 @@ def runTest(self): if check_leackout_compensation_support(asic_type, hwsku): pkts_num_leak_out = 0 - xmit_counters_history, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + xmit_counters_history, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) que_min_pkts_num = 0 # send packets to fill queue min but not trek into shared pool @@ -3833,12 +3908,14 @@ def runTest(self): time.sleep(8) if que_min_pkts_num > 0 and check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_history, self, src_port_id, pkt, 40) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, + port_list['dst'][dst_port_id], TRANSMITTED_PKTS, + xmit_counters_history, self, src_port_id, pkt, 40) q_wm_res, pg_shared_wm_res, pg_headroom_wm_res=sai_thrift_read_port_watermarks( - self.client, port_list[dst_port_id]) + self.dst_client, port_list['dst'][dst_port_id]) pg_cntrs=sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ( que_min_pkts_num, pkts_num_fill_min, q_wm_res[queue]), file=sys.stderr) print("Received packets: %d" % @@ -3856,7 +3933,11 @@ def runTest(self): elif 'cisco-8000' in asic_type: assert(q_wm_res[queue] <= (margin + 1) * cell_size) else: - assert(q_wm_res[queue] <= 1 * cell_size) + if platform_asic and platform_asic == "broadcom-dnx": + logging.info("On J2C+ don't support SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES " + \ + "stat - so ignoring this step for now") + else: + assert(q_wm_res[queue] <= 1 * cell_size) # send packet batch of fixed packet numbers to fill queue shared # first round sends only 1 packet @@ -3879,10 +3960,8 @@ def runTest(self): fragment=total_shared - expected_wm if 'cisco-8000' in asic_type: - self.sai_thrift_port_tx_disable( - self.client, asic_type, [dst_port_id]) - assert(fill_leakout_plus_one(self, src_port_id, - dst_port_id, pkt, queue, asic_type)) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) + assert(fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) pkts_total += pkts_num pkts_num=pkts_total - 1 @@ -3893,19 +3972,21 @@ def runTest(self): if 'cisco-8000' in asic_type: self.sai_thrift_port_tx_enable( - self.client, asic_type, [dst_port_id]) + self.dst_client, asic_type, [dst_port_id]) time.sleep(8) if que_min_pkts_num == 0 and pkts_num <= 1 + margin and check_leackout_compensation_support(asic_type, hwsku): - dynamically_compensate_leakout(self.client, asic_type, sai_thrift_read_port_counters, port_list[dst_port_id], TRANSMITTED_PKTS, xmit_counters_history, self, src_port_id, pkt, 40) + dynamically_compensate_leakout(self.dst_client, asic_type, sai_thrift_read_port_counters, + port_list['dst'][dst_port_id], TRANSMITTED_PKTS, + xmit_counters_history, self, src_port_id, pkt, 40) # these counters are clear on read, ensure counter polling # is disabled before the test q_wm_res, pg_shared_wm_res, pg_headroom_wm_res=sai_thrift_read_port_watermarks( - self.client, port_list[dst_port_id]) + self.dst_client, port_list['dst'][dst_port_id]) pg_cntrs=sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("Received packets: %d" % (pg_cntrs[queue] - pg_cntrs_base[queue]), file=sys.stderr) print("lower bound: %d, actual value: %d, upper bound: %d" % ((expected_wm - margin) @@ -3928,8 +4009,7 @@ def runTest(self): pkts_num=pkts_inc if 'cisco-8000' in asic_type: - self.sai_thrift_port_tx_disable( - self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) assert(fill_leakout_plus_one(self, src_port_id, dst_port_id, pkt, queue, asic_type)) pkts_total += pkts_num @@ -3939,14 +4019,13 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num) if 'cisco-8000' in asic_type: - self.sai_thrift_port_tx_enable( - self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) q_wm_res, pg_shared_wm_res, pg_headroom_wm_res=sai_thrift_read_port_watermarks( - self.client, port_list[dst_port_id]) + self.dst_client, port_list['dst'][dst_port_id]) pg_cntrs=sai_thrift_read_pg_counters( - self.client, port_list[src_port_id]) + self.src_client, port_list['src'][src_port_id]) print("Received packets: %d" % (pg_cntrs[queue] - pg_cntrs_base[queue]), file=sys.stderr) print("exceeded pkts num sent: %d, actual value: %d, lower bound: %d, upper bound: %d" % ( @@ -3969,7 +4048,7 @@ def runTest(self): assert(q_wm_res[queue] <= (expected_wm + margin) * cell_size) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) # TODO: buffer pool roid should be obtained via rpc calls # based on the pg or queue index @@ -3977,7 +4056,7 @@ def runTest(self): class BufferPoolWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): time.sleep(5) - switch_init(self.client) + switch_init(self.clients) # Parse input parameters dscp=int(self.test_params['dscp']) @@ -4010,7 +4089,7 @@ def runTest(self): if 'cisco-8000' in asic_type: # Some small amount of memory is always occupied buffer_pool_wm_base=sai_thrift_read_buffer_pool_watermark( - self.client, buf_pool_roid) + self.src_client, buf_pool_roid) # Prepare TCP packet data tos=dscp << 2 @@ -4062,13 +4141,13 @@ def runTest(self): # the impact caused by lossy traffic # # TH2 uses scheduler-based TX enable, this does not require sending packets to leak out - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) pkts_num_to_send += (pkts_num_leak_out + pkts_num_fill_min) send_packet(self, src_port_id, pkt, pkts_num_to_send) - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm=sai_thrift_read_buffer_pool_watermark( - self.client, buf_pool_roid) - buffer_pool_wm_base + self.src_client, buf_pool_roid) - buffer_pool_wm_base print("Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ( (pkts_num_leak_out + pkts_num_fill_min), pkts_num_fill_min, buffer_pool_wm), file=sys.stderr) if pkts_num_fill_min: @@ -4102,8 +4181,7 @@ def runTest(self): print("pkts num to send: %d, total pkts: %d, shared: %d" % (pkts_num, expected_wm, total_shared), file=sys.stderr) - self.sai_thrift_port_tx_disable( - self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) pkts_num_to_send += pkts_num if 'cisco-8000' in asic_type: assert(fill_leakout_plus_one(self, src_port_id, @@ -4111,11 +4189,10 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num_to_send - 1) else: send_packet(self, src_port_id, pkt, pkts_num_to_send) - self.sai_thrift_port_tx_enable( - self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm=sai_thrift_read_buffer_pool_watermark( - self.client, buf_pool_roid) - buffer_pool_wm_base + self.src_client, buf_pool_roid) - buffer_pool_wm_base print("lower bound (-%d): %d, actual value: %d, upper bound (+%d): %d" % (lower_bound_margin, (expected_wm - lower_bound_margin) * cell_size, buffer_pool_wm, upper_bound_margin, (expected_wm + upper_bound_margin) * cell_size), file=sys.stderr) assert(buffer_pool_wm <= (expected_wm + \ @@ -4126,7 +4203,7 @@ def runTest(self): pkts_num=pkts_inc # overflow the shared pool - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) pkts_num_to_send += pkts_num if 'cisco-8000' in asic_type: assert(fill_leakout_plus_one(self, src_port_id, @@ -4135,10 +4212,10 @@ def runTest(self): else: send_packet(self, src_port_id, pkt, pkts_num_to_send) - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) time.sleep(8) buffer_pool_wm=sai_thrift_read_buffer_pool_watermark( - self.client, buf_pool_roid) - buffer_pool_wm_base + self.src_client, buf_pool_roid) - buffer_pool_wm_base print("exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % ( pkts_num, (expected_wm * cell_size), buffer_pool_wm), file=sys.stderr) assert(expected_wm == total_shared) @@ -4147,7 +4224,7 @@ def runTest(self): assert(buffer_pool_wm <= (expected_wm + extra_cap_margin) * cell_size) finally: - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) class PacketTransmit(sai_base_test.ThriftInterfaceDataPlane): @@ -4235,7 +4312,7 @@ def runTest(self): Traffic is ingressed from IPinIP tunnel(LAG port), and then being decaped at active tor, and then egress to server. Tx is disabled on the egress port to trigger PFC pause. """ - switch_init(self.client) + switch_init(self.clients) # Parse input parameters active_tor_mac = self.test_params['active_tor_mac'] @@ -4268,11 +4345,11 @@ def runTest(self): try: # Disable tx on EGRESS port so that headroom buffer cannot be free - self.sai_thrift_port_tx_disable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_disable(self.dst_client, asic_type, [dst_port_id]) # Make a snapshot of transmitted packets - tx_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + tx_counters_base, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) # Make a snapshot of received packets - rx_counters_base, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + rx_counters_base, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) if tunnel_traffic_test: # Build IPinIP packet for testing pkt = self._build_testing_ipinip_pkt(active_tor_mac=active_tor_mac, @@ -4297,22 +4374,24 @@ def runTest(self): send_packet(self, src_port_id, pkt, pkts_num_trig_pfc) time.sleep(8) # Read TX_OK again to calculate leaked packet number - tx_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[dst_port_id]) + tx_counters, _ = sai_thrift_read_port_counters(self.dst_client, asic_type, port_list['dst'][dst_port_id]) leaked_packet_number = tx_counters[TRANSMITTED_PKTS] - tx_counters_base[TRANSMITTED_PKTS] # Send packets to compensate the leaked packets send_packet(self, src_port_id, pkt, leaked_packet_number) time.sleep(8) # Read rx counter again. No PFC pause frame should be triggered - rx_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + rx_counters, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) # Verify no pfc assert(rx_counters[pg] == rx_counters_base[pg]) rx_counters_base = rx_counters # Send some packets to trigger PFC send_packet(self, src_port_id, pkt, 1 + 2 * pkts_num_margin) time.sleep(8) - rx_counters, _ = sai_thrift_read_port_counters(self.client, asic_type, port_list[src_port_id]) + rx_counters, _ = sai_thrift_read_port_counters(self.src_client, asic_type, port_list['src'][src_port_id]) # Verify PFC pause frame is generated on expected PG assert(rx_counters[pg] > rx_counters_base[pg]) finally: # Enable tx on dest port - self.sai_thrift_port_tx_enable(self.client, asic_type, [dst_port_id]) + self.sai_thrift_port_tx_enable(self.dst_client, asic_type, [dst_port_id]) + + diff --git a/tests/saitests/py3/switch.py b/tests/saitests/py3/switch.py index d2c4dfe72dd..41c18655147 100644 --- a/tests/saitests/py3/switch.py +++ b/tests/saitests/py3/switch.py @@ -32,11 +32,12 @@ this_dir = os.path.dirname(os.path.abspath(__file__)) -switch_inited = 0 +switch_inited=0 +# All below are dictionary with key 'src' or 'dst' and val is value for that client port_list = {} -sai_port_list = [] -front_port_list = [] -table_attr_list = [] +sai_port_list = {} +front_port_list = {} +table_attr_list = {} router_mac = '00:77:66:55:44:00' rewrite_mac1 = '00:77:66:55:45:01' rewrite_mac2 = '00:77:66:55:46:01' @@ -48,37 +49,56 @@ RELEASE_PORT_MAX_RATE = 0 -def switch_init(client): +def switch_init(clients): global switch_inited if switch_inited: return - switch_attr_list = client.sai_thrift_get_switch_attribute() - attr_list = switch_attr_list.attr_list - for attribute in attr_list: - if attribute.id == SAI_SWITCH_ATTR_PORT_NUMBER: - print("max ports: " + attribute.value.u32) - elif attribute.id == SAI_SWITCH_ATTR_PORT_LIST: - for port_id in attribute.value.objlist.object_id_list: - attr_value = sai_thrift_attribute_value_t(booldata=1) - attr = sai_thrift_attribute_t( - id=SAI_PORT_ATTR_ADMIN_STATE, value=attr_value) - client.sai_thrift_set_port_attribute(port_id, attr) - sai_port_list.append(port_id) - else: - print("unknown switch attribute") + def _get_data_for_client(client, target): + _port_list = {} + _sai_port_list = [] + _front_port_list = [] + switch_attr_list = client.sai_thrift_get_switch_attribute() + attr_list = switch_attr_list.attr_list + for attribute in attr_list: + if attribute.id == SAI_SWITCH_ATTR_PORT_NUMBER: + print("max ports {}".format(attribute.value.u32), file=sys.stderr) + elif attribute.id == SAI_SWITCH_ATTR_PORT_LIST: + for port_id in attribute.value.objlist.object_id_list: + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_ADMIN_STATE, value=attr_value) + client.sai_thrift_set_port_attribute(port_id, attr) + _sai_port_list.append(port_id) + else: + print("unknown switch attribute", file=sys.stderr) + + # TOFIX in brcm sai: This causes the following error on td2 (a7050-qx-32s) + # ERR syncd: brcm_sai_set_switch_attribute:842 updating switch mac addr failed with error -2. + attr_value = sai_thrift_attribute_value_t(mac='00:77:66:55:44:33') + attr = sai_thrift_attribute_t(id=SAI_SWITCH_ATTR_SRC_MAC_ADDRESS, value=attr_value) + client.sai_thrift_set_switch_attribute(attr) # wait till the port are up time.sleep(10) - thrift_attr = client.sai_thrift_get_port_list_by_front_port() - if thrift_attr.id == SAI_SWITCH_ATTR_PORT_LIST: - for port_id in thrift_attr.value.objlist.object_id_list: - front_port_list.append(port_id) - - for interface, front in interface_to_front_mapping.items(): - sai_port_id = client.sai_thrift_get_port_id_by_front_port(front) - port_list[int(interface)] = sai_port_id + thrift_attr = client.sai_thrift_get_port_list_by_front_port() + if thrift_attr.id == SAI_SWITCH_ATTR_PORT_LIST: + for port_id in thrift_attr.value.objlist.object_id_list: + _front_port_list.append(port_id) + + for interface, front in interface_to_front_mapping[target].items(): + sai_port_id = client.sai_thrift_get_port_id_by_front_port(front) + _port_list[int(interface)] = sai_port_id + return _port_list, _sai_port_list, _front_port_list + + port_list['src'], sai_port_list['src'], front_port_list['src'] = _get_data_for_client(clients['src'], 'src') + if 'dst' not in clients: + port_list['dst'] = port_list['src'] + sai_port_list['dst'] = sai_port_list['src'] + front_port_list['dst'] = front_port_list['src'] + else: + port_list['dst'], sai_port_list['dst'], front_port_list['dst'] = \ + _get_data_for_client(clients['dst'], 'dst') switch_inited = 1 @@ -660,8 +680,8 @@ def sai_thrift_create_pool_profile(client, pool_type, size, threshold_mode): return pool_id -def sai_thrift_clear_all_counters(client): - for port in sai_port_list: +def sai_thrift_clear_all_counters(client, target): + for port in sai_port_list[target]: queue_list = [] client.sai_thrift_clear_port_all_stats(port) port_attr_list = client.sai_thrift_get_port_attribute(port) @@ -677,7 +697,7 @@ def sai_thrift_clear_all_counters(client): client.sai_thrift_clear_queue_stats(queue, cnt_ids, len(cnt_ids)) -def sai_thrift_port_tx_disable(client, asic_type, port_ids): +def sai_thrift_port_tx_disable(client, asic_type, port_ids, target='dst'): if asic_type == 'mellanox': # Close DST port sched_prof_id = sai_thrift_create_scheduler_profile( @@ -692,10 +712,10 @@ def sai_thrift_port_tx_disable(client, asic_type, port_ids): id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) for port_id in port_ids: - client.sai_thrift_set_port_attribute(port_list[port_id], attr) + client.sai_thrift_set_port_attribute(port_list[target][port_id], attr) -def sai_thrift_port_tx_enable(client, asic_type, port_ids): +def sai_thrift_port_tx_enable(client, asic_type, port_ids, target='dst'): if asic_type == 'mellanox': # Release port sched_prof_id = sai_thrift_create_scheduler_profile( @@ -710,7 +730,7 @@ def sai_thrift_port_tx_enable(client, asic_type, port_ids): id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) for port_id in port_ids: - client.sai_thrift_set_port_attribute(port_list[port_id], attr) + client.sai_thrift_set_port_attribute(port_list[target][port_id], attr) def sai_thrift_read_port_counters(client, asic_type, port): @@ -900,9 +920,9 @@ def sai_thrift_read_headroom_pool_watermark(client, buffer_pool_id): return wm_vals[0] -def sai_thrift_read_queue_occupancy(client, port_id): +def sai_thrift_read_queue_occupancy(client, target, port_id): queue_list = [] - port_attr_list = client.sai_thrift_get_port_attribute(port_list[port_id]) + port_attr_list = client.sai_thrift_get_port_attribute(port_list[target][port_id]) attr_list = port_attr_list.attr_list for attribute in attr_list: if attribute.id == SAI_PORT_ATTR_QOS_QUEUE_LIST: diff --git a/tests/test_pretest.py b/tests/test_pretest.py index 13eb172a52c..8a3579c60a0 100644 --- a/tests/test_pretest.py +++ b/tests/test_pretest.py @@ -171,7 +171,8 @@ def test_disable_rsyslog_rate_limit(duthosts, enum_dut_hostname): def collect_dut_lossless_prio(dut): - config_facts = dut.config_facts(host=dut.hostname, source="running")['ansible_facts'] + dut_asic = dut.asic_instance() + config_facts = dut_asic.config_facts(host=dut.hostname, source="running")['ansible_facts'] if "PORT_QOS_MAP" not in config_facts.keys(): return [] @@ -189,7 +190,8 @@ def collect_dut_lossless_prio(dut): return result def collect_dut_all_prio(dut): - config_facts = dut.config_facts(host=dut.hostname, source="running")['ansible_facts'] + dut_asic = dut.asic_instance() + config_facts = dut_asic.config_facts(host=dut.hostname, source="running")['ansible_facts'] if "DSCP_TO_TC_MAP" not in config_facts.keys(): return []