Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tests/common/plugins/conditional_mark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ def find_longest_matches(nodeid, conditions):
for condition in conditions:
# condition is a dict which has only one item, so we use condition.keys()[0] to get its key.
if nodeid.startswith(list(condition.keys())[0]):
length = len(condition)
length = len(list(condition.keys())[0])
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what does this line change's purpose?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

condition a dict with a single item and its length will always be 1 irrespective of how many rules are present.

So the key length should be checked, not the length of dict, to find the longest prefix match key.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

did you aware of this change will make the current skipped case to be run?

if length > max_length:
max_length = length
longest_matches = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1290,6 +1290,12 @@ qos/test_qos_sai.py::TestQosSai::testQosSaiDwrrWeightChange:
conditions:
- "asic_type in ['mellanox']"

qos/test_qos_sai.py::TestQosSai::testQosSaiFullMeshTrafficSanity:
skip:
reason: "Unsupported platform or testbed type."
conditions:
- "asic_type not in ['cisco-8000'] or topo_name not in ['ptf64']"

qos/test_qos_sai.py::TestQosSai::testQosSaiHeadroomPoolSize:
skip:
reason: "Headroom pool size not supported."
Expand Down
94 changes: 91 additions & 3 deletions tests/qos/qos_sai_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,13 +132,17 @@ def runPtfTest(self, ptfhost, testCase='', testParams={}, relax=False):
"""
custom_options = " --disable-ipv6 --disable-vxlan --disable-geneve" \
" --disable-erspan --disable-mpls --disable-nvgre"
# Append a suffix to the logfile name if log_suffix is present in testParams
log_suffix = testParams.get("log_suffix", "")
logfile_suffix = "_{0}".format(log_suffix) if log_suffix else ""

ptf_runner(
ptfhost,
"saitests",
testCase,
platform_dir="ptftests",
params=testParams,
log_file="/tmp/{0}.log".format(testCase),
log_file="/tmp/{0}{1}.log".format(testCase, logfile_suffix), # Include suffix in the logfile name,
qlen=10000,
is_python3=True,
relax=relax,
Expand Down Expand Up @@ -789,9 +793,32 @@ def __buildPortSpeeds(self, config_facts):
port_speeds[attr['speed']].append(etp)
return port_speeds

@pytest.fixture(scope='class', autouse=False)
def configure_ip_on_ptf_intfs(self, ptfhost, get_src_dst_asic_and_duts, tbinfo):
src_dut = get_src_dst_asic_and_duts['src_dut']
src_mgFacts = src_dut.get_extended_minigraph_facts(tbinfo)
topo = tbinfo["topo"]["name"]

# if PTF64 and is Cisco, set ip IP address on eth interfaces of the ptf"
if topo == 'ptf64' and is_cisco_device(src_dut):
minigraph_ip_interfaces = src_mgFacts['minigraph_interfaces']
for entry in minigraph_ip_interfaces:
ptfhost.shell("ip addr add {}/31 dev eth{}".format(
entry['peer_addr'], src_mgFacts["minigraph_ptf_indices"][entry['attachto']])
)
yield
for entry in minigraph_ip_interfaces:
ptfhost.shell("ip addr del {}/31 dev eth{}".format(
entry['peer_addr'], src_mgFacts["minigraph_ptf_indices"][entry['attachto']])
)
return
else:
yield
return

@pytest.fixture(scope='class', autouse=True)
def dutConfig(
self, request, duthosts, get_src_dst_asic_and_duts,
self, request, duthosts, configure_ip_on_ptf_intfs, get_src_dst_asic_and_duts,
lower_tor_host, tbinfo, dualtor_ports_for_duts, dut_qos_maps): # noqa F811
"""
Build DUT host config pertaining to QoS SAI tests
Expand Down Expand Up @@ -910,7 +937,7 @@ def dutConfig(
testPortIds[src_dut_index][src_asic_index] = sorted(
list(testPortIps[src_dut_index][src_asic_index].keys()))

elif topo in self.SUPPORTED_T1_TOPOS:
elif topo in self.SUPPORTED_T1_TOPOS or (topo in self.SUPPORTED_PTF_TOPOS and is_cisco_device(src_dut)):
# T1 is supported only for 'single_asic' or 'single_dut_multi_asic'.
# So use src_dut as the dut
use_separated_upkink_dscp_tc_map = separated_dscp_to_tc_map_on_uplink(dut_qos_maps)
Expand Down Expand Up @@ -2312,6 +2339,67 @@ def populate_arp_entries(
ptfhost, testCase=saiQosTest, testParams=testParams
)

@pytest.fixture(scope="function", autouse=False)
def set_static_route_ptf64(self, dutConfig, get_src_dst_asic_and_duts, dutTestParams, enum_frontend_asic_index):
def generate_ip_address(base_ip, new_first_octet):
octets = base_ip.split('.')
if len(octets) != 4:
raise ValueError("Invalid IP address format")
octets[0] = str(new_first_octet)
octets[2] = octets[3]
octets[3] = '1'
return '.'.join(octets)

def combine_ips(src_ips, dst_ips, new_first_octet):
combined_ips_map = {}

for key, src_info in src_ips.items():
src_ip = src_info['peer_addr']
new_ip = generate_ip_address(src_ip, new_first_octet)
combined_ips_map[key] = {'original_ip': src_ip, 'generated_ip': new_ip}

for key, dst_info in dst_ips.items():
dst_ip = dst_info['peer_addr']
new_ip = generate_ip_address(dst_ip, new_first_octet)
combined_ips_map[key] = {'original_ip': dst_ip, 'generated_ip': new_ip}

return combined_ips_map

def configRoutePrefix(add_route):
action = "add" if add_route else "del"
for port, entry in combined_ips_map.items():
if enum_frontend_asic_index is None:
src_asic.shell("config route {} prefix {}.0/24 nexthop {}".format(
action, '.'.join(entry['generated_ip'].split('.')[:3]), entry['original_ip']))
else:
src_asic.shell("ip netns exec asic{} config route {} prefix {}.0/24 nexthop {}".format(
enum_frontend_asic_index,
action, '.'.join(entry['generated_ip'].split('.')[:3]),
entry['original_ip'])
)

if dutTestParams["basicParams"]["sonic_asic_type"] != "cisco-8000":
pytest.skip("Traffic sanity test is not supported")

if dutTestParams["topo"] != "ptf64":
pytest.skip("Test not supported in {} topology. Use ptf64 topo".format(dutTestParams["topo"]))

src_dut_index = get_src_dst_asic_and_duts['src_dut_index']
dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index']
src_asic_index = get_src_dst_asic_and_duts['src_asic_index']
dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index']
src_asic = get_src_dst_asic_and_duts['src_asic']

src_testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index]
dst_testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index]

new_first_octet = 100
combined_ips_map = combine_ips(src_testPortIps, dst_testPortIps, new_first_octet)

configRoutePrefix(True)
yield combined_ips_map
configRoutePrefix(False)

@pytest.fixture(scope="function", autouse=False)
def skip_longlink(self, dutQosConfig):
portSpeedCableLength = dutQosConfig["portSpeedCableLength"]
Expand Down
87 changes: 87 additions & 0 deletions tests/qos/test_qos_sai.py
Original file line number Diff line number Diff line change
Expand Up @@ -2258,3 +2258,90 @@ def testQosSaiLossyQueueVoqMultiSrc(
ptfhost, testCase="sai_qos_tests.LossyQueueVoqMultiSrcTest",
testParams=testParams
)

def testQosSaiFullMeshTrafficSanity(
self, ptfhost, dutTestParams, dutConfig, dutQosConfig,
get_src_dst_asic_and_duts, dut_qos_maps, # noqa F811
set_static_route_ptf64
):
"""
Test QoS SAI traffic sanity
Args:
ptfhost (AnsibleHost): Packet Test Framework (PTF)
dutTestParams (Fixture, dict): DUT host test params
dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
and test ports
dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration
Returns:
None
Raises:
RunAnsibleModuleFail if ptf test fails
"""

# Execution with a specific set of dst port
def run_test_for_dst_port(start, end):
test_params = dict()
test_params.update(dutTestParams["basicParams"])
test_params.update({
"testbed_type": dutTestParams["topo"],
"all_src_port_id_to_ip": all_src_port_id_to_ip,
"all_src_port_id_to_name": all_src_port_id_to_name,
"all_dst_port_id_to_ip": {port_id: all_dst_port_id_to_ip[port_id] for port_id in range(start, end)},
"all_dst_port_id_to_name": {port_id: all_dst_port_id_to_name[port_id] for port_id in range(start, end)},
"dscp_to_q_map": dscp_to_q_map,
# Add a log_suffix to have separate log and pcap file name
"log_suffix": "_".join([str(port_id) for port_id in range(start, end)]),
"hwsku": dutTestParams['hwsku']
})

self.runPtfTest(ptfhost, testCase="sai_qos_tests.FullMeshTrafficSanity", testParams=test_params)

src_dut_index = get_src_dst_asic_and_duts['src_dut_index']
dst_dut_index = get_src_dst_asic_and_duts['dst_dut_index']
src_asic_index = get_src_dst_asic_and_duts['src_asic_index']
dst_asic_index = get_src_dst_asic_and_duts['dst_asic_index']

src_testPortIps = dutConfig["testPortIps"][src_dut_index][src_asic_index]
dst_testPortIps = dutConfig["testPortIps"][dst_dut_index][dst_asic_index]

# Fetch all port IDs and IPs
all_src_port_id_to_ip = {port_id: src_testPortIps[port_id]['peer_addr'] for port_id in src_testPortIps.keys()}

all_src_port_id_to_name = {
port_id: dutConfig["dutInterfaces"][port_id]
for port_id in all_src_port_id_to_ip.keys()
}

all_dst_port_id_to_ip = {
port_id: set_static_route_ptf64[port_id]['generated_ip']
for port_id in dst_testPortIps.keys()
}

all_dst_port_id_to_name = {
port_id: dutConfig["dutInterfaces"][port_id]
for port_id in all_dst_port_id_to_ip.keys()
}

try:
tc_to_q_map = dut_qos_maps['tc_to_queue_map']['AZURE']
tc_to_dscp_map = {v: k for k, v in dut_qos_maps['dscp_to_tc_map']['AZURE'].items()}
except KeyError:
pytest.skip(
"Need both TC_TO_PRIORITY_GROUP_MAP and DSCP_TO_TC_MAP"
"and key AZURE to run this test.")

dscp_to_q_map = {tc_to_dscp_map[tc]: tc_to_q_map[tc] for tc in tc_to_dscp_map if tc != 7}

# Define the number of splits
# for the dst port list
num_splits = 4

# Get all keys and sort them
all_keys = sorted(all_dst_port_id_to_ip.keys())

# Calculate the split points
split_points = [all_keys[i * len(all_keys) // num_splits] for i in range(1, num_splits)]

# Execute with one set of dst port at a time, avoids ptf run getting timed out
for start, end in zip([0] + split_points, split_points + [len(all_keys)]):
run_test_for_dst_port(start, end)
Loading