diff --git a/ansible/library/testing_port_ip_facts.py b/ansible/library/testing_port_ip_facts.py new file mode 100644 index 00000000000..a7c2fa71a9a --- /dev/null +++ b/ansible/library/testing_port_ip_facts.py @@ -0,0 +1,70 @@ +#!/usr/bin/python +import netaddr + +DOCUMENTATION = ''' +--- +module: testing_port_ip_facts +version_added: "1.1" +author: Wenda Ni (wenni@microsoft.com) +short_description: Retrive bgp peer ip facts +description: + - Retrieve bgp peer ip for the ptf interfaces, indexed in testing_ports_id. + The ips are to be used as the src or dst port ips in ptf-generated packets. +options: + testing_ports_id: + description: a sublist of ptf_interfaces. + required: true + dut_switch_ports: + description: a list + required: true + minigraph_bgp: + description: a list + required: true + minigraph_neighbors: + description: a map + required: true +''' + +EXAMPLES = ''' +Retrieve bgp peer ips +- name: Get testing port IPs + testing_port_ip_facts: + testing_ports_id: "{{ testing_ports_id }}" + dut_switch_ports: "{{ dut_switch_ports }}" + minigraph_bgp: "{{ minigraph_bgp }}" + minigraph_neighbors: "{{ minigraph_neighbors }}" + connection: local +''' + + +def main(): + module = AnsibleModule( + argument_spec=dict( + testing_ports_id=dict(required=True), + dut_switch_ports=dict(required=True), + minigraph_bgp=dict(reguired=True), + minigraph_neighbors=dict(reguired=True), + ), + supports_check_mode=True + ) + + m_args = module.params + testing_ports_id = m_args['testing_ports_id'] + dut_switch_ports = m_args['dut_switch_ports'] + minigraph_bgp = m_args['minigraph_bgp'] + minigraph_neighbors = m_args['minigraph_neighbors'] + + testing_ports_ip = {} + + for port_id in testing_ports_id: + for peer in minigraph_bgp: + if peer['name'] == minigraph_neighbors[dut_switch_ports[int(port_id)]]['name'] and netaddr.valid_ipv4(peer['addr']): + testing_ports_ip[port_id] = peer['addr'] + break + + module.exit_json(ansible_facts={'testing_ports_ip': testing_ports_ip}) + +from ansible.module_utils.basic import * +if __name__== "__main__": + main() + diff --git a/ansible/roles/test/files/brcm/64_interface_to_front_map.ini b/ansible/roles/test/files/brcm/64_interface_to_front_map.ini new file mode 100644 index 00000000000..5e37187d106 --- /dev/null +++ b/ansible/roles/test/files/brcm/64_interface_to_front_map.ini @@ -0,0 +1,65 @@ +# ptf host interface @ switch front port name +0@Ethernet0 +1@Ethernet1 +2@Ethernet2 +3@Ethernet3 +4@Ethernet4 +5@Ethernet5 +6@Ethernet6 +7@Ethernet7 +8@Ethernet8 +9@Ethernet9 +10@Ethernet10 +11@Ethernet11 +12@Ethernet12 +13@Ethernet13 +14@Ethernet14 +15@Ethernet15 +16@Ethernet16 +17@Ethernet17 +18@Ethernet18 +19@Ethernet19 +20@Ethernet20 +21@Ethernet21 +22@Ethernet22 +23@Ethernet23 +24@Ethernet24 +25@Ethernet25 +26@Ethernet26 +27@Ethernet27 +28@Ethernet28 +29@Ethernet29 +30@Ethernet30 +31@Ethernet31 +32@Ethernet32 +33@Ethernet33 +34@Ethernet34 +35@Ethernet35 +36@Ethernet36 +37@Ethernet37 +38@Ethernet38 +39@Ethernet39 +40@Ethernet40 +41@Ethernet41 +42@Ethernet42 +43@Ethernet43 +44@Ethernet44 +45@Ethernet45 +46@Ethernet46 +47@Ethernet47 +48@Ethernet48 +49@Ethernet49 +50@Ethernet50 +51@Ethernet51 +52@Ethernet52 +53@Ethernet53 +54@Ethernet54 +55@Ethernet55 +56@Ethernet56 +57@Ethernet57 +58@Ethernet58 +59@Ethernet59 +60@Ethernet60 +61@Ethernet61 +62@Ethernet62 +63@Ethernet63 diff --git a/ansible/roles/test/files/brcm/d108c8_interface_to_front_map.ini b/ansible/roles/test/files/brcm/d108c8_interface_to_front_map.ini new file mode 100644 index 00000000000..f43acaf4e3b --- /dev/null +++ b/ansible/roles/test/files/brcm/d108c8_interface_to_front_map.ini @@ -0,0 +1,121 @@ +# ptf host interface @ switch front port name +0@Ethernet0 +1@Ethernet2 +2@Ethernet4 +3@Ethernet6 +4@Ethernet8 +5@Ethernet10 +6@Ethernet12 +7@Ethernet14 +8@Ethernet16 +9@Ethernet18 +10@Ethernet20 +11@Ethernet22 +12@Ethernet24 +13@Ethernet26 +14@Ethernet28 +15@Ethernet30 +16@Ethernet32 +17@Ethernet34 +18@Ethernet36 +19@Ethernet38 +20@Ethernet40 +21@Ethernet42 +22@Ethernet44 +23@Ethernet46 +24@Ethernet48 +25@Ethernet52 +26@Ethernet56 +27@Ethernet60 +28@Ethernet64 +29@Ethernet68 +30@Ethernet72 +31@Ethernet76 +32@Ethernet80 +33@Ethernet82 +34@Ethernet84 +35@Ethernet86 +36@Ethernet88 +37@Ethernet90 +38@Ethernet92 +39@Ethernet94 +40@Ethernet96 +41@Ethernet98 +42@Ethernet100 +43@Ethernet102 +44@Ethernet104 +45@Ethernet106 +46@Ethernet108 +47@Ethernet110 +48@Ethernet112 +49@Ethernet114 +50@Ethernet116 +51@Ethernet118 +52@Ethernet120 +53@Ethernet122 +54@Ethernet124 +55@Ethernet126 +56@Ethernet128 +57@Ethernet130 +58@Ethernet132 +59@Ethernet134 +60@Ethernet136 +61@Ethernet138 +62@Ethernet140 +63@Ethernet142 +64@Ethernet144 +65@Ethernet146 +66@Ethernet148 +67@Ethernet150 +68@Ethernet152 +69@Ethernet154 +70@Ethernet156 +71@Ethernet158 +72@Ethernet160 +73@Ethernet162 +74@Ethernet164 +75@Ethernet166 +76@Ethernet168 +77@Ethernet170 +78@Ethernet172 +79@Ethernet174 +80@Ethernet176 +81@Ethernet178 +82@Ethernet180 +83@Ethernet182 +84@Ethernet184 +85@Ethernet186 +86@Ethernet188 +87@Ethernet190 +88@Ethernet192 +89@Ethernet194 +90@Ethernet196 +91@Ethernet198 +92@Ethernet200 +93@Ethernet202 +94@Ethernet204 +95@Ethernet206 +96@Ethernet208 +97@Ethernet210 +98@Ethernet212 +99@Ethernet214 +100@Ethernet216 +101@Ethernet218 +102@Ethernet220 +103@Ethernet222 +104@Ethernet224 +105@Ethernet226 +106@Ethernet228 +107@Ethernet230 +108@Ethernet232 +109@Ethernet234 +110@Ethernet236 +111@Ethernet238 +112@Ethernet240 +113@Ethernet242 +114@Ethernet244 +115@Ethernet246 +116@Ethernet248 +117@Ethernet250 +118@Ethernet252 +119@Ethernet254 diff --git a/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini b/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini index 8ca7d1a774d..0db110025b8 100644 --- a/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini +++ b/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini @@ -1,33 +1,33 @@ # ptf host interface @ switch front port name -0@Ethernet1 -1@Ethernet2 -2@Ethernet3 -3@Ethernet4 -4@Ethernet5 -5@Ethernet6 -6@Ethernet7 -7@Ethernet8 -8@Ethernet9 -9@Ethernet10 -10@Ethernet11 -11@Ethernet12 -12@Ethernet13 -13@Ethernet14 -14@Ethernet15 -15@Ethernet16 -16@Ethernet17 -17@Ethernet18 -18@Ethernet19 -19@Ethernet20 -20@Ethernet21 -21@Ethernet22 -22@Ethernet23 -23@Ethernet24 -24@Ethernet25 -25@Ethernet26 -26@Ethernet27 -27@Ethernet28 -28@Ethernet29 -29@Ethernet30 -30@Ethernet31 -31@Ethernet32 \ No newline at end of file +0@Ethernet0 +1@Ethernet4 +2@Ethernet8 +3@Ethernet12 +4@Ethernet16 +5@Ethernet20 +6@Ethernet24 +7@Ethernet28 +8@Ethernet32 +9@Ethernet36 +10@Ethernet40 +11@Ethernet44 +12@Ethernet48 +13@Ethernet52 +14@Ethernet56 +15@Ethernet60 +16@Ethernet64 +17@Ethernet68 +18@Ethernet72 +19@Ethernet76 +20@Ethernet80 +21@Ethernet84 +22@Ethernet88 +23@Ethernet92 +24@Ethernet96 +25@Ethernet100 +26@Ethernet104 +27@Ethernet108 +28@Ethernet112 +29@Ethernet116 +30@Ethernet120 +31@Ethernet124 diff --git a/ansible/roles/test/files/mlnx/packets_aging.py b/ansible/roles/test/files/mlnx/packets_aging.py new file mode 100755 index 00000000000..584962a3d26 --- /dev/null +++ b/ansible/roles/test/files/mlnx/packets_aging.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +''' +This file contains Python script to enable/disable packets aging in queues(buffers?). +''' + +import sys, errno +import os +import argparse +from python_sdk_api.sx_api import (SX_STATUS_SUCCESS, + sx_api_open, + sx_api_port_device_get, + sx_api_port_sll_set, + sx_api_port_sll_get, + sx_api_port_hll_set, + new_sx_port_attributes_t_arr, + new_uint32_t_p, + uint32_t_p_assign, + uint32_t_p_value, + sx_port_attributes_t_arr_getitem) + +parser = argparse.ArgumentParser(description='Toggle Mellanox-specific packet aging on egress queues') +parser.add_argument('command', choices=['enable', 'disable'], type=str, help='Enable/Disable packet aging') +args = parser.parse_args() + +# Open SDK +rc, handle = sx_api_open(None) +if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "Failed to open api handle.\nPlease check that SDK is running." + sys.exit(errno.EACCES) + +# Get list of ports +port_attributes_list = new_sx_port_attributes_t_arr(64) +port_cnt_p = new_uint32_t_p() +uint32_t_p_assign(port_cnt_p, 64) + +rc = sx_api_port_device_get(handle, 1 , 0, port_attributes_list, port_cnt_p) +if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_device_get." + sys.exit() +port_cnt = uint32_t_p_value(port_cnt_p) + +set_mode = False +if args.command == "enable": # enable packets aging + sll_time = 0x418937 + hll_time = 0x83127 + hll_stall = 7 + set_mode = True +else: + assert args.command == "disable" # disable packets aging + sll_time = 0xffffffffffffffff + hll_time = 0xffffffff + hll_stall = 0 + set_mode = True + +if set_mode: + rc = sx_api_port_sll_set(handle, sll_time) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_sll_set." + sys.exit() +else: + sll_p = new_uint64_t_p() + rc = sx_api_port_sll_get(handle, sll_p) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_sll_get." + sys.exit() + else: + sll = uint64_t_p_value(sll_p) + print >> sys.stderr, ("sll_max_time=0x%X" % sll) + +for i in range(0, port_cnt): + port_attributes = sx_port_attributes_t_arr_getitem(port_attributes_list,i) + log_port = int(port_attributes.log_port) + if log_port < 0xFFFFF: # only physical ports + if set_mode: + rc = sx_api_port_hll_set(handle, log_port, hll_time, hll_stall) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_hll_set." + sys.exit() + else: + hll_max_time_p = new_uint32_t_p() + hll_stall_cnt_p = new_uint32_t_p() + rc = sx_api_port_hll_get(handle,log_port, hll_max_time_p, hll_stall_cnt_p) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_hll_set." + sys.exit() + else: + hll_max_time = uint32_t_p_value(hll_max_time_p) + hll_stall_cnt = uint32_t_p_value(hll_stall_cnt_p) + print >> sys.stderr, ("Port%d(Ethernet%d, logical:0x%X) hll_time:0x%X, hll_stall:0x%X" % + (port_attributes.port_mapping.module_port, (port_attributes.port_mapping.module_port * 4), + log_port, hll_max_time, hll_stall_cnt)) diff --git a/ansible/roles/test/files/saitests/sai_qos_tests.py b/ansible/roles/test/files/saitests/sai_qos_tests.py new file mode 100644 index 00000000000..6e7eb0dbb16 --- /dev/null +++ b/ansible/roles/test/files/saitests/sai_qos_tests.py @@ -0,0 +1,1885 @@ +""" +SONiC Dataplane Qos tests +""" + +import time +import logging +import ptf.packet as scapy +import socket +import ptf.dataplane as dataplane +import sai_base_test +import operator +import sys +from ptf.testutils import (ptf_ports, + simple_arp_packet, + send_packet, + simple_tcp_packet, + simple_qinq_tcp_packet) +from ptf.mask import Mask +from switch import (switch_init, + sai_thrift_create_scheduler_profile, + sai_thrift_clear_all_counters, + sai_thrift_read_port_counters, + sai_port_list, + port_list, + sai_thrift_read_port_watermarks, + sai_thrift_read_pg_counters, + sai_thrift_read_buffer_pool_watermark) +from switch_sai_thrift.ttypes import (sai_thrift_attribute_value_t, + sai_thrift_attribute_t) +from switch_sai_thrift.sai_headers import (SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, + SAI_PORT_ATTR_PKT_TX_ENABLE) + +# Counters +# The index number comes from the append order in sai_thrift_read_port_counters +EGRESS_DROP = 0 +INGRESS_DROP = 1 +PFC_PRIO_3 = 5 +PFC_PRIO_4 = 6 +TRANSMITTED_OCTETS = 10 +TRANSMITTED_PKTS = 11 +QUEUE_0 = 0 +QUEUE_1 = 1 +QUEUE_2 = 2 +QUEUE_3 = 3 +QUEUE_4 = 4 +QUEUE_5 = 5 +QUEUE_6 = 6 +PG_NUM = 8 +QUEUE_NUM = 8 + +# Constants +STOP_PORT_MAX_RATE = 1 +RELEASE_PORT_MAX_RATE = 0 +ECN_INDEX_IN_HEADER = 53 # Fits the ptf hex_dump_buffer() parse function +DSCP_INDEX_IN_HEADER = 52 # Fits the ptf hex_dump_buffer() parse function + + +class ARPpopulate(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + router_mac = self.test_params['router_mac'] + # ARP Populate + index = 0 + for port in ptf_ports(): + arpreq_pkt = simple_arp_packet( + eth_dst='ff:ff:ff:ff:ff:ff', + eth_src=self.dataplane.get_mac(port[0],port[1]), + arp_op=1, + ip_snd='10.0.0.%d' % (index * 2 + 1), + ip_tgt='10.0.0.%d' % (index * 2), + hw_snd=self.dataplane.get_mac(port[0], port[1]), + hw_tgt='00:00:00:00:00:00') + send_packet(self, port[1], arpreq_pkt) + index += 1 + +class ReleaseAllPorts(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + asic_type = self.test_params['sonic_asic_type'] + + if asic_type == 'mellanox': + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + + for port in sai_port_list: + self.client.sai_thrift_set_port_attribute(port, attr) + +# DSCP to queue mapping +class DscpMappingPB(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + router_mac = self.test_params['router_mac'] + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id) + print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip) + exp_ip_id = 101 + exp_ttl = 63 + + # Get a snapshot of counter values + # port_results is not of our interest here + port_results, queue_results_base = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + + # DSCP Mapping test + try: + for dscp in range(0, 64): + tos = (dscp << 2) + tos |= 1 + pkt = simple_tcp_packet(pktlen=64, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=exp_ttl + 1 if router_mac != '' else exp_ttl) + send_packet(self, src_port_id, pkt, 1) + print >> sys.stderr, "dscp: %d, calling send_packet()" % (tos >> 2) + + cnt = 0 + dscp_received = False + while not dscp_received: + result = self.dataplane.poll(device_number=0, port_number=dst_port_id, timeout=3) + if isinstance(result, self.dataplane.PollFailure): + self.fail("Expected packet was not received on port %d. Total received: %d.\n%s" % (dst_port_id, cnt, result.format())) + recv_pkt = scapy.Ether(result.packet) + cnt += 1 + + # Verify dscp flag + try: + if (recv_pkt.payload.tos == tos) and (recv_pkt.payload.src == src_port_ip) and (recv_pkt.payload.dst == dst_port_ip) and \ + (recv_pkt.payload.ttl == exp_ttl) and (recv_pkt.payload.id == exp_ip_id): + dscp_received = True + print >> sys.stderr, "dscp: %d, total received: %d" % (tos >> 2, cnt) + except AttributeError: + print >> sys.stderr, "dscp: %d, total received: %d, attribute error!" % (tos >> 2, cnt) + continue + + # Read Counters + port_results, queue_results = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + + print >> sys.stderr, map(operator.sub, queue_results, queue_results_base) + # According to SONiC configuration all dscp are classified to queue 1 except: + # dscp 8 -> queue 0 + # dscp 5 -> queue 2 + # dscp 3 -> queue 3 + # dscp 4 -> queue 4 + # dscp 46 -> queue 5 + # dscp 48 -> queue 6 + # So for the 64 pkts sent the mapping should be -> 58 queue 1, and 1 for queue0, queue2, queue3, queue4, queue5, and queue6 + # Check results + assert(queue_results[QUEUE_0] == 1 + queue_results_base[QUEUE_0]) + assert(queue_results[QUEUE_1] == 58 + queue_results_base[QUEUE_1]) + assert(queue_results[QUEUE_2] == 1 + queue_results_base[QUEUE_2]) + assert(queue_results[QUEUE_3] == 1 + queue_results_base[QUEUE_3]) + assert(queue_results[QUEUE_4] == 1 + queue_results_base[QUEUE_4]) + assert(queue_results[QUEUE_5] == 1 + queue_results_base[QUEUE_5]) + assert(queue_results[QUEUE_6] == 1 + queue_results_base[QUEUE_6]) + + finally: + print >> sys.stderr, "END OF TEST" + +# DOT1P to queue mapping +class Dot1pToQueueMapping(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + # Parse input parameters + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id) + print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip) + vlan_id = int(self.test_params['vlan_id']) + + exp_ip_id = 102 + exp_ttl = 63 + + # According to SONiC configuration dot1ps are classified as follows: + # dot1p 0 -> queue 0 + # dot1p 1 -> queue 6 + # dot1p 2 -> queue 5 + # dot1p 3 -> queue 3 + # dot1p 4 -> queue 4 + # dot1p 5 -> queue 2 + # dot1p 6 -> queue 1 + # dot1p 7 -> queue 0 + queue_dot1p_map = { + 0 : [0, 7], + 1 : [6], + 2 : [5], + 3 : [3], + 4 : [4], + 5 : [2], + 6 : [1] + } + print >> sys.stderr, queue_dot1p_map + + try: + for queue, dot1ps in queue_dot1p_map.items(): + port_results, queue_results_base = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + + # send pkts with dot1ps that map to the same queue + for dot1p in dot1ps: + # ecn marked + tos = 1 + # Note that vlan tag can be stripped by a switch. + # To embrace this situation, we assemble a q-in-q double-tagged packet, + # and write the dot1p info into both vlan tags so that + # when we receive the packet we do not need to make any assumption + # on whether the outer tag is stripped by the switch or not, or + # more importantly, we do not need to care about, as in the single-tagged + # case, whether the immediate payload is the vlan tag or the ip + # header to determine the valid fields for receive validation + # purpose. With a q-in-q packet, we are sure that the next layer of + # header in either switching behavior case is still a vlan tag + pkt = simple_qinq_tcp_packet(pktlen=64, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + dl_vlan_outer=vlan_id, + dl_vlan_pcp_outer=dot1p, + vlan_vid=vlan_id, + vlan_pcp=dot1p, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=exp_ttl + 1 if router_mac != '' else exp_ttl) + send_packet(self, src_port_id, pkt, 1) + print >> sys.stderr, "dot1p: %d, calling send_packet" % (dot1p) + + # validate queue counters increment by the correct pkt num + time.sleep(8) + port_results, queue_results = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + print >> sys.stderr, queue_results_base + print >> sys.stderr, queue_results + print >> sys.stderr, map(operator.sub, queue_results, queue_results_base) + for i in range(0, QUEUE_NUM): + if i == queue: + assert(queue_results[queue] == queue_results_base[queue] + len(dot1ps)) + else: + assert(queue_results[i] == queue_results_base[i]) + + # confirm that dot1p pkts sent are received + total_recv_cnt = 0 + dot1p_recv_cnt = 0 + while dot1p_recv_cnt < len(dot1ps): + result = self.dataplane.poll(device_number=0, port_number=dst_port_id, timeout=3) + if isinstance(result, self.dataplane.PollFailure): + self.fail("Expected packet was not received on port %d. Total received: %d.\n%s" % (dst_port_id, total_recv_cnt, result.format())) + recv_pkt = scapy.Ether(result.packet) + total_recv_cnt += 1 + + # verify dot1p priority + dot1p = dot1ps[dot1p_recv_cnt] + try: + if (recv_pkt.payload.prio == dot1p) and (recv_pkt.payload.vlan == vlan_id): + + dot1p_recv_cnt += 1 + print >> sys.stderr, "dot1p: %d, total received: %d" % (dot1p, total_recv_cnt) + + except AttributeError: + print >> sys.stderr, "dot1p: %d, total received: %d, attribute error!" % (dot1p, total_recv_cnt) + continue + + finally: + print >> sys.stderr, "END OF TEST" + +# DSCP to pg mapping +class DscpToPgMapping(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + # Parse input parameters + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id) + print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip) + + exp_ip_id = 100 + exp_ttl = 63 + + # According to SONiC configuration all dscps are classified to pg 0 except: + # dscp 3 -> pg 3 + # dscp 4 -> pg 4 + # So for the 64 pkts sent the mapping should be -> 62 pg 0, 1 for pg 3, and 1 for pg 4 + lossy_dscps = range(0, 64) + lossy_dscps.remove(3) + lossy_dscps.remove(4) + pg_dscp_map = { + 3 : [3], + 4 : [4], + 0 : lossy_dscps + } + print >> sys.stderr, pg_dscp_map + + try: + for pg, dscps in pg_dscp_map.items(): + pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) + + # send pkts with dscps that map to the same pg + for dscp in dscps: + tos = (dscp << 2) + tos |= 1 + pkt = simple_tcp_packet(pktlen=64, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=exp_ttl + 1 if router_mac != '' else exp_ttl) + send_packet(self, src_port_id, pkt, 1) + print >> sys.stderr, "dscp: %d, calling send_packet" % (tos >> 2) + + # validate pg counters increment by the correct pkt num + time.sleep(8) + pg_cntrs = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) + print >> sys.stderr, pg_cntrs_base + print >> sys.stderr, pg_cntrs + print >> sys.stderr, map(operator.sub, pg_cntrs, pg_cntrs_base) + for i in range(0, PG_NUM): + if i == pg: + assert(pg_cntrs[pg] == pg_cntrs_base[pg] + len(dscps)) + else: + assert(pg_cntrs[i] == pg_cntrs_base[i]) + + # confirm that dscp pkts are received + total_recv_cnt = 0 + dscp_recv_cnt = 0 + while dscp_recv_cnt < len(dscps): + result = self.dataplane.poll(device_number=0, port_number=dst_port_id, timeout=3) + if isinstance(result, self.dataplane.PollFailure): + self.fail("Expected packet was not received on port %d. Total received: %d.\n%s" % (dst_port_id, total_recv_cnt, result.format())) + recv_pkt = scapy.Ether(result.packet) + total_recv_cnt += 1 + + # verify dscp flag + tos = dscps[dscp_recv_cnt] << 2 + tos |= 1 + try: + if (recv_pkt.payload.tos == tos) and (recv_pkt.payload.src == src_port_ip) and (recv_pkt.payload.dst == dst_port_ip) and \ + (recv_pkt.payload.ttl == exp_ttl) and (recv_pkt.payload.id == exp_ip_id): + + dscp_recv_cnt += 1 + print >> sys.stderr, "dscp: %d, total received: %d" % (tos >> 2, total_recv_cnt) + + except AttributeError: + print >> sys.stderr, "dscp: %d, total received: %d, attribute error!" % (tos >> 2, total_recv_cnt) + continue + + finally: + print >> sys.stderr, "END OF TEST" + +# DOT1P to pg mapping +class Dot1pToPgMapping(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + # Parse input parameters + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id) + print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip) + vlan_id = int(self.test_params['vlan_id']) + + exp_ip_id = 103 + exp_ttl = 63 + + # According to SONiC configuration dot1ps are classified as follows: + # dot1p 0 -> pg 0 + # dot1p 1 -> pg 0 + # dot1p 2 -> pg 0 + # dot1p 3 -> pg 3 + # dot1p 4 -> pg 4 + # dot1p 5 -> pg 0 + # dot1p 6 -> pg 0 + # dot1p 7 -> pg 0 + pg_dot1p_map = { + 0 : [0, 1, 2, 5, 6, 7], + 3 : [3], + 4 : [4] + } + print >> sys.stderr, pg_dot1p_map + + try: + for pg, dot1ps in pg_dot1p_map.items(): + pg_cntrs_base = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) + + # send pkts with dot1ps that map to the same pg + for dot1p in dot1ps: + # ecn marked + tos = 1 + # Note that vlan tag can be stripped by a switch. + # To embrace this situation, we assemble a q-in-q double-tagged packet, + # and write the dot1p info into both vlan tags so that + # when we receive the packet we do not need to make any assumption + # on whether the outer tag is stripped by the switch or not, or + # more importantly, we do not need to care about, as in the single-tagged + # case, whether the immediate payload is the vlan tag or the ip + # header to determine the valid fields for receive validation + # purpose. With a q-in-q packet, we are sure that the next layer of + # header in either switching behavior case is still a vlan tag + pkt = simple_qinq_tcp_packet(pktlen=64, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + dl_vlan_outer=vlan_id, + dl_vlan_pcp_outer=dot1p, + vlan_vid=vlan_id, + vlan_pcp=dot1p, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=exp_ttl + 1 if router_mac != '' else exp_ttl) + send_packet(self, src_port_id, pkt, 1) + print >> sys.stderr, "dot1p: %d, calling send_packet" % (dot1p) + + # validate pg counters increment by the correct pkt num + time.sleep(8) + pg_cntrs = sai_thrift_read_pg_counters(self.client, port_list[src_port_id]) + print >> sys.stderr, pg_cntrs_base + print >> sys.stderr, pg_cntrs + print >> sys.stderr, map(operator.sub, pg_cntrs, pg_cntrs_base) + for i in range(0, PG_NUM): + if i == pg: + assert(pg_cntrs[pg] == pg_cntrs_base[pg] + len(dot1ps)) + else: + assert(pg_cntrs[i] == pg_cntrs_base[i]) + + # confirm that dot1p pkts sent are received + total_recv_cnt = 0 + dot1p_recv_cnt = 0 + while dot1p_recv_cnt < len(dot1ps): + result = self.dataplane.poll(device_number=0, port_number=dst_port_id, timeout=3) + if isinstance(result, self.dataplane.PollFailure): + self.fail("Expected packet was not received on port %d. Total received: %d.\n%s" % (dst_port_id, total_recv_cnt, result.format())) + recv_pkt = scapy.Ether(result.packet) + total_recv_cnt += 1 + + # verify dot1p priority + dot1p = dot1ps[dot1p_recv_cnt] + try: + if (recv_pkt.payload.prio == dot1p) and (recv_pkt.payload.vlan == vlan_id): + + dot1p_recv_cnt += 1 + print >> sys.stderr, "dot1p: %d, total received: %d" % (dot1p, total_recv_cnt) + + except AttributeError: + print >> sys.stderr, "dot1p: %d, total received: %d, attribute error!" % (dot1p, total_recv_cnt) + continue + + finally: + print >> sys.stderr, "END OF TEST" + +# This test is to measure the Xoff threshold, and buffer limit +class PFCtest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + pg = int(self.test_params['pg']) + 2 # The pfc counter index starts from index 2 in sai_thrift_read_port_counters + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + max_buffer_size = int(self.test_params['buffer_max_size']) + max_queue_size = int(self.test_params['queue_max_size']) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + asic_type = self.test_params['sonic_asic_type'] + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_trig_pfc = int(self.test_params['pkts_num_trig_pfc']) + pkts_num_trig_ingr_drp = int(self.test_params['pkts_num_trig_ingr_drp']) + + # Prepare TCP packet data + tos = dscp << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + # get a snapshot of counter values at recv and transmit ports + # queue_counters value is not of our interest here + recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # Add slight tolerance in threshold characterization to consider + # the case that cpu puts packets in the egress queue after we pause the egress + # or the leak out is simply less than expected as we have occasionally observed + margin = 2 + + if asic_type == 'mellanox': + # Close DST port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + try: + # send packets short of triggering pfc + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_pfc - 1 - margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # recv port no pfc + assert(recv_counters[pg] == recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + + # send 1 packet to trigger pfc + send_packet(self, src_port_id, pkt, 1 + 2 * margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters_base = recv_counters + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # recv port pfc + assert(recv_counters[pg] > recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + + # send packets short of ingress drop + send_packet(self, src_port_id, pkt, pkts_num_trig_ingr_drp - pkts_num_trig_pfc - 1 - 2 * margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters_base = recv_counters + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # recv port pfc + assert(recv_counters[pg] > recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + + # send 1 packet to trigger ingress drop + send_packet(self, src_port_id, pkt, 1 + 2 * margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters_base = recv_counters + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # recv port pfc + assert(recv_counters[pg] > recv_counters_base[pg]) + # recv port ingress drop + assert(recv_counters[INGRESS_DROP] > recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + + finally: + if asic_type == 'mellanox': + # Release port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + +# This test looks to measure xon threshold (pg_reset_floor) +class PFCXonTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + last_pfc_counter = 0 + recv_port_counters = [] + transmit_port_counters = [] + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + max_buffer_size = int(self.test_params['buffer_max_size']) + pg = int(self.test_params['pg']) + 2 # The pfc counter index starts from index 2 in sai_thrift_read_port_counters + + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + asic_type = self.test_params['sonic_asic_type'] + + tos = dscp << 2 + tos |= ecn + ttl = 64 + + # TODO: pass in dst_port_id and _ip as a list + dst_port_2_id = int(self.test_params['dst_port_2_id']) + dst_port_2_ip = self.test_params['dst_port_2_ip'] + dst_port_2_mac = self.dataplane.get_mac(0, dst_port_2_id) + dst_port_3_id = int(self.test_params['dst_port_3_id']) + dst_port_3_ip = self.test_params['dst_port_3_ip'] + dst_port_3_mac = self.dataplane.get_mac(0, dst_port_3_id) + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_trig_pfc = int(self.test_params['pkts_num_trig_pfc']) + pkts_num_dismiss_pfc = int(self.test_params['pkts_num_dismiss_pfc']) + default_packet_length = 64 + # get a snapshot of counter values at recv and transmit ports + # queue_counters value is not of our interest here + recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + xmit_2_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_2_id]) + xmit_3_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_3_id]) + # The number of packets that will trek into the headroom space; + # We observe in test that if the packets are sent to multiple destination ports, + # the ingress may not trigger PFC sharp at its boundary + margin = 1 + + if asic_type == 'mellanox': + # Stop function of dst xmit ports + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_3_id], attr) + else: + # Pause egress of dut xmit ports + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_3_id], attr) + + try: + # send packets to dst port 0 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_pfc - pkts_num_dismiss_pfc) + # send packets to dst port 1 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_2_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_2_ip, + ip_tos=tos, + ip_ttl=ttl) + send_packet(self, src_port_id, pkt, pkts_num_leak_out + margin + pkts_num_dismiss_pfc - 1) + # send 1 packet to dst port 2 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_3_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_3_ip, + ip_tos=tos, + ip_ttl=ttl) + send_packet(self, src_port_id, pkt, pkts_num_leak_out + 1) + + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + xmit_2_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_2_id]) + xmit_3_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_3_id]) + # recv port pfc + assert(recv_counters[pg] > recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + assert(xmit_2_counters[EGRESS_DROP] == xmit_2_counters_base[EGRESS_DROP]) + assert(xmit_3_counters[EGRESS_DROP] == xmit_3_counters_base[EGRESS_DROP]) + + if asic_type == 'mellanox': + # Release dst port 1 + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + else: + # Resume egress of dst port 1 + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters_base = recv_counters + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + xmit_2_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_2_id]) + xmit_3_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_3_id]) + # recv port pfc + assert(recv_counters[pg] > recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + assert(xmit_2_counters[EGRESS_DROP] == xmit_2_counters_base[EGRESS_DROP]) + assert(xmit_3_counters[EGRESS_DROP] == xmit_3_counters_base[EGRESS_DROP]) + + if asic_type == 'mellanox': + # Release dst port 2 + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_3_id], attr) + else: + # Resume egress of dst port 2 + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_3_id], attr) + + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get new base counter values at recv ports + # queue counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + recv_counters_base = recv_counters + + time.sleep(30) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + xmit_2_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_2_id]) + xmit_3_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_3_id]) + # recv port no pfc + assert(recv_counters[pg] == recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + assert(xmit_2_counters[EGRESS_DROP] == xmit_2_counters_base[EGRESS_DROP]) + assert(xmit_3_counters[EGRESS_DROP] == xmit_3_counters_base[EGRESS_DROP]) + + finally: + if asic_type == 'mellanox': + # Release dst ports + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_3_id], attr) + else: + # Resume egress of dut xmit ports + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_3_id], attr) + +class HdrmPoolSizeTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + + # Parse input parameters + dscps = self.test_params['dscps'] + ecn = self.test_params['ecn'] + router_mac = self.test_params['router_mac'] + pgs = [pg + 2 for pg in self.test_params['pgs']] # The pfc counter index starts from index 2 in sai_thrift_read_port_counters + src_port_ids = self.test_params['src_port_ids'] + src_port_ips = self.test_params['src_port_ips'] + print >> sys.stderr, src_port_ips + sys.stderr.flush() + + dst_port_id = self.test_params['dst_port_id'] + dst_port_ip = self.test_params['dst_port_ip'] + pgs_num = self.test_params['pgs_num'] + asic_type = self.test_params['sonic_asic_type'] + pkts_num_leak_out = self.test_params['pkts_num_leak_out'] + pkts_num_trig_pfc = self.test_params['pkts_num_trig_pfc'] + pkts_num_hdrm_full = self.test_params['pkts_num_hdrm_full'] + pkts_num_hdrm_partial = self.test_params['pkts_num_hdrm_partial'] + print >> sys.stderr, ("pkts num: leak_out: %d, trig_pfc: %d, hdrm_full: %d, hdrm_partial: %d" % (pkts_num_leak_out, pkts_num_trig_pfc, pkts_num_hdrm_full, pkts_num_hdrm_partial)) + sys.stderr.flush() + + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_macs = [self.dataplane.get_mac(0, ptid) for ptid in src_port_ids] + margin = 0 + sidx_dscp_pg_tuples = [(sidx, dscp, pgs[pgidx]) for sidx, sid in enumerate(src_port_ids) for pgidx, dscp in enumerate(dscps)] + assert(len(sidx_dscp_pg_tuples) >= pgs_num) + print >> sys.stderr, sidx_dscp_pg_tuples + sys.stderr.flush() + + # get a snapshot of counter values at recv and transmit ports + # queue_counters value is not of our interest here + recv_counters_bases = [sai_thrift_read_port_counters(self.client, port_list[sid])[0] for sid in src_port_ids] + xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + + # Pause egress of dut xmit port + if asic_type == 'mellanox': + # Close DST port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + try: + # send packets to leak out + sidx = 0 + pkt = simple_tcp_packet(pktlen=64, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_macs[sidx], + ip_src=src_port_ips[sidx], + ip_dst=dst_port_ip, + ip_ttl=64) + send_packet(self, src_port_ids[sidx], pkt, pkts_num_leak_out) + + # send packets to all pgs to fill the service pool + # and trigger PFC on all pgs + for i in range(0, pgs_num): + # Prepare TCP packet data + tos = sidx_dscp_pg_tuples[i][1] << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_macs[sidx_dscp_pg_tuples[i][0]], + ip_src=src_port_ips[sidx_dscp_pg_tuples[i][0]], + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + send_packet(self, src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, pkts_num_trig_pfc) + + print >> sys.stderr, "Service pool almost filled" + sys.stderr.flush() + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + + for i in range(0, pgs_num): + # Prepare TCP packet data + tos = sidx_dscp_pg_tuples[i][1] << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_macs[sidx_dscp_pg_tuples[i][0]], + ip_src=src_port_ips[sidx_dscp_pg_tuples[i][0]], + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + pkt_cnt = 0 + + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + while (recv_counters[sidx_dscp_pg_tuples[i][2]] == recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) and (pkt_cnt < 10): + send_packet(self, src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, 1) + pkt_cnt += 1 + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + + # get a snapshot of counter values at recv and transmit ports + # queue_counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + + if pkt_cnt == 10: + sys.exit("Too many pkts needed to trigger pfc: %d" % (pkt_cnt)) + assert(recv_counters[sidx_dscp_pg_tuples[i][2]] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][sidx_dscp_pg_tuples[i][2]]) + print >> sys.stderr, "%d packets for sid: %d, pg: %d to trigger pfc" % (pkt_cnt, src_port_ids[sidx_dscp_pg_tuples[i][0]], sidx_dscp_pg_tuples[i][2] - 2) + sys.stderr.flush() + + print >> sys.stderr, "PFC triggered" + sys.stderr.flush() + + # send packets to all pgs to fill the headroom pool + for i in range(0, pgs_num): + # Prepare TCP packet data + tos = sidx_dscp_pg_tuples[i][1] << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_macs[sidx_dscp_pg_tuples[i][0]], + ip_src=src_port_ips[sidx_dscp_pg_tuples[i][0]], + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + + send_packet(self, src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, pkts_num_hdrm_full if i != pgs_num - 1 else pkts_num_hdrm_partial) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + # assert no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_bases[sidx_dscp_pg_tuples[i][0]][INGRESS_DROP]) + + print >> sys.stderr, "all but the last pg hdrms filled" + sys.stderr.flush() + + # last pg + i = pgs_num - 1 + # send 1 packet on last pg to trigger ingress drop + send_packet(self, src_port_ids[sidx_dscp_pg_tuples[i][0]], pkt, 1 + 2 * margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_ids[sidx_dscp_pg_tuples[i][0]]]) + # assert ingress drop + assert(recv_counters[INGRESS_DROP] > recv_counters_bases[sidx_dscp_pg_tuples[i][0]][INGRESS_DROP]) + + # assert no egress drop at the dut xmit port + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + + print >> sys.stderr, "pg hdrm filled" + sys.stderr.flush() + + finally: + if asic_type == 'mellanox': + # Release port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + else: + # Resume egress of dur xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + +# TODO: remove sai_thrift_clear_all_counters and change to use incremental counter values +class DscpEcnSend(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + default_packet_length = 64 + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + num_of_pkts = self.test_params['num_of_pkts'] + limit = self.test_params['limit'] + min_limit = self.test_params['min_limit'] + cell_size = self.test_params['cell_size'] + + #STOP PORT FUNCTION + sched_prof_id=sai_thrift_create_scheduler_profile(self.client,STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + # Clear Counters + sai_thrift_clear_all_counters(self.client) + + #send packets + try: + tos = dscp << 2 + tos |= ecn + ttl = 64 + for i in range(0, num_of_pkts): + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + send_packet(self, 0, pkt) + + leaking_pkt_number = 0 + for (rcv_port_number, pkt_str, pkt_time) in self.dataplane.packets(0, 1): + leaking_pkt_number += 1 + print "leaking packet %d" % leaking_pkt_number + + # Read Counters + print "DST port counters: " + port_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + print port_counters + print queue_counters + + # Clear Counters + sai_thrift_clear_all_counters(self.client) + + # Set receiving socket buffers to some big value + for p in self.dataplane.ports.values(): + p.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 41943040) + + # RELEASE PORT + sched_prof_id=sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + + # if (ecn == 1) - capture and parse all incoming packets + marked_cnt = 0 + not_marked_cnt = 0 + if (ecn == 1): + print "" + print "ECN capable packets generated, releasing dst_port and analyzing traffic -" + + cnt = 0 + pkts = [] + for i in xrange(num_of_pkts): + (rcv_device, rcv_port, rcv_pkt, pkt_time) = dp_poll(self, device_number=0, port_number=dst_port_id, timeout=0.2) + if rcv_pkt is not None: + cnt += 1 + pkts.append(rcv_pkt) + else: # Received less packets then expected + assert (cnt == num_of_pkts) + print " Received packets: " + str(cnt) + + for pkt_to_inspect in pkts: + pkt_str = hex_dump_buffer(pkt_to_inspect) + + # Count marked and not marked amount of packets + if ( (int(pkt_str[ECN_INDEX_IN_HEADER]) & 0x03) == 1 ): + not_marked_cnt += 1 + elif ( (int(pkt_str[ECN_INDEX_IN_HEADER]) & 0x03) == 3 ): + assert (not_marked_cnt == 0) + marked_cnt += 1 + + print " ECN non-marked pkts: " + str(not_marked_cnt) + print " ECN marked pkts: " + str(marked_cnt) + print "" + + time.sleep(5) + # Read Counters + print "DST port counters: " + port_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + print port_counters + print queue_counters + if (ecn == 0): + transmitted_data = port_counters[TRANSMITTED_PKTS] * 2 * cell_size #num_of_pkts*pkt_size_in_cells*cell_size + assert (port_counters[TRANSMITTED_OCTETS] <= limit * 1.05) + assert (transmitted_data >= min_limit) + assert (marked_cnt == 0) + elif (ecn == 1): + non_marked_data = not_marked_cnt * 2 * cell_size + assert (non_marked_data <= limit*1.05) + assert (non_marked_data >= limit*0.95) + assert (marked_cnt == (num_of_pkts - not_marked_cnt)) + assert (port_counters[EGRESS_DROP] == 0) + assert (port_counters[INGRESS_DROP] == 0) + + finally: + # RELEASE PORT + sched_prof_id=sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + print "END OF TEST" + +class WRRtest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + # Parse input parameters + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id) + print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip) + asic_type = self.test_params['sonic_asic_type'] + default_packet_length = 1500 + exp_ip_id = 110 + queue_0_num_of_pkts = int(self.test_params['q0_num_of_pkts']) + queue_1_num_of_pkts = int(self.test_params['q1_num_of_pkts']) + queue_2_num_of_pkts = int(self.test_params['q2_num_of_pkts']) + queue_3_num_of_pkts = int(self.test_params['q3_num_of_pkts']) + queue_4_num_of_pkts = int(self.test_params['q4_num_of_pkts']) + queue_5_num_of_pkts = int(self.test_params['q5_num_of_pkts']) + queue_6_num_of_pkts = int(self.test_params['q6_num_of_pkts']) + limit = int(self.test_params['limit']) + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + + if asic_type == 'mellanox': + # Stop port function + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + # Send packets to leak out + pkt = simple_tcp_packet(pktlen=64, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_ttl=64) + send_packet(self, src_port_id, pkt, pkts_num_leak_out) + + # Get a snapshot of counter values + port_counters_base, queue_counters_base = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + + # Send packets to each queue based on dscp field + dscp = 3 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_3_num_of_pkts) + + dscp = 4 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_4_num_of_pkts) + + dscp = 8 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_0_num_of_pkts) + + dscp = 0 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_1_num_of_pkts) + + dscp = 5 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_2_num_of_pkts) + + dscp = 46 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_5_num_of_pkts) + + dscp = 48 + tos = dscp << 2 + tos |= ecn + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_id=exp_ip_id, + ip_ttl=64) + send_packet(self, src_port_id, pkt, queue_6_num_of_pkts) + + # Set receiving socket buffers to some big value + for p in self.dataplane.ports.values(): + p.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 41943040) + + # Release port + if asic_type == 'mellanox': + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + cnt = 0 + pkts = [] + recv_pkt = scapy.Ether() + + while recv_pkt: + received = self.dataplane.poll(device_number=0, port_number=dst_port_id, timeout=2) + if isinstance(received, self.dataplane.PollFailure): + recv_pkt = None + break + recv_pkt = scapy.Ether(received.packet) + + try: + if recv_pkt.payload.src == src_port_ip and recv_pkt.payload.dst == dst_port_ip and recv_pkt.payload.id == exp_ip_id: + cnt += 1 + pkts.append(recv_pkt) + except AttributeError: + continue + + queue_pkt_counters = [0] * 49 + queue_num_of_pkts = [0] * 49 + queue_num_of_pkts[8] = queue_0_num_of_pkts + queue_num_of_pkts[0] = queue_1_num_of_pkts + queue_num_of_pkts[5] = queue_2_num_of_pkts + queue_num_of_pkts[3] = queue_3_num_of_pkts + queue_num_of_pkts[4] = queue_4_num_of_pkts + queue_num_of_pkts[46] = queue_5_num_of_pkts + queue_num_of_pkts[48] = queue_6_num_of_pkts + total_pkts = 0 + + for pkt_to_inspect in pkts: + dscp_of_pkt = pkt_to_inspect.payload.tos >> 2 + total_pkts += 1 + + # Count packet ordering + + queue_pkt_counters[dscp_of_pkt] += 1 + if queue_pkt_counters[dscp_of_pkt] == queue_num_of_pkts[dscp_of_pkt]: + assert((queue_0_num_of_pkts + queue_1_num_of_pkts + queue_2_num_of_pkts + queue_3_num_of_pkts + queue_4_num_of_pkts + queue_5_num_of_pkts + queue_6_num_of_pkts) - total_pkts < limit) + + print >> sys.stderr, queue_pkt_counters + + # Read counters + print "DST port counters: " + port_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + print >> sys.stderr, map(operator.sub, queue_counters, queue_counters_base) + + # All packets sent should be received intact + assert(queue_0_num_of_pkts + queue_1_num_of_pkts + queue_2_num_of_pkts + queue_3_num_of_pkts + queue_4_num_of_pkts + queue_5_num_of_pkts + queue_6_num_of_pkts == total_pkts) + +class LossyQueueTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + pg = int(self.test_params['pg']) + 2 # The pfc counter index starts from index 2 in sai_thrift_read_port_counters + router_mac = self.test_params['router_mac'] + max_buffer_size = int(self.test_params['buffer_max_size']) + headroom_size = int(self.test_params['headroom_size']) + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + dst_port_2_id = int(self.test_params['dst_port_2_id']) + dst_port_2_ip = self.test_params['dst_port_2_ip'] + dst_port_2_mac = self.dataplane.get_mac(0, dst_port_2_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + asic_type = self.test_params['sonic_asic_type'] + + # prepare tcp packet data + tos = dscp << 2 + tos |= ecn + ttl = 64 + + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_trig_egr_drp = int(self.test_params['pkts_num_trig_egr_drp']) + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + # get a snapshot of counter values at recv and transmit ports + # queue_counters value is not of our interest here + recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # add slight tolerance in threshold characterization to consider + # the case that cpu puts packets in the egress queue after we pause the egress + # or the leak out is simply less than expected as we have occasionally observed + margin = 2 + + if asic_type == 'mellanox': + # Stop port function + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + try: + # send packets short of triggering egress drop + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_egr_drp - 1 - margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # recv port no pfc + assert(recv_counters[pg] == recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port no egress drop + assert(xmit_counters[EGRESS_DROP] == xmit_counters_base[EGRESS_DROP]) + + # send 1 packet to trigger egress drop + send_packet(self, src_port_id, pkt, 1 + 2 * margin) + # allow enough time for the dut to sync up the counter values in counters_db + time.sleep(8) + # get a snapshot of counter values at recv and transmit ports + # queue counters value is not of our interest here + recv_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id]) + xmit_counters, queue_counters = sai_thrift_read_port_counters(self.client, port_list[dst_port_id]) + # recv port no pfc + assert(recv_counters[pg] == recv_counters_base[pg]) + # recv port no ingress drop + assert(recv_counters[INGRESS_DROP] == recv_counters_base[INGRESS_DROP]) + # xmit port egress drop + assert(xmit_counters[EGRESS_DROP] > xmit_counters_base[EGRESS_DROP]) + + finally: + if asic_type == 'mellanox': + # Release ports + sched_prof_id=sai_thrift_create_scheduler_profile(self.client, RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_2_id], attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + +# pg shared pool applied to both lossy and lossless traffic +class PGSharedWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + pg = int(self.test_params['pg']) + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + + asic_type = self.test_params['sonic_asic_type'] + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_fill_min = int(self.test_params['pkts_num_fill_min']) + pkts_num_fill_shared = int(self.test_params['pkts_num_fill_shared']) + cell_size = int(self.test_params['cell_size']) + + # Prepare TCP packet data + tos = dscp << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + # Add slight tolerance in threshold characterization to consider + # the case that cpu puts packets in the egress queue after we pause the egress + # or the leak out is simply less than expected as we have occasionally observed + margin = 2 + + if asic_type == 'mellanox': + # Close DST port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + # send packets + try: + # send packets to fill pg min but not trek into shared pool + # so if pg min is zero, it directly treks into shared pool by 1 + # this is the case for lossy traffic + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + print >> sys.stderr, "Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ((pkts_num_leak_out + pkts_num_fill_min), pkts_num_fill_min, pg_shared_wm_res[pg]) + if pkts_num_fill_min: + assert(pg_shared_wm_res[pg] == 0) + else: + # on t1-lag, we found vm will keep sending control + # packets, this will cause the watermark to be 2 * 208 bytes + # as all lossy packets are now mapped to single pg 0 + # so we remove the strict equity check, and use upper bound + # check instead + assert(1 * cell_size <= pg_shared_wm_res[pg]) + assert(pg_shared_wm_res[pg] <= margin * cell_size) + + # send packet batch of fixed packet numbers to fill pg shared + # first round sends only 1 packet + expected_wm = 0 + total_shared = pkts_num_fill_shared - pkts_num_fill_min + pkts_inc = total_shared >> 2 + pkts_num = 1 + margin + while (expected_wm < total_shared): + expected_wm += pkts_num + if (expected_wm > total_shared): + pkts_num -= (expected_wm - total_shared) + expected_wm = total_shared + print >> sys.stderr, "pkts num to send: %d, total pkts: %d, pg shared: %d" % (pkts_num, expected_wm, total_shared) + + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + print >> sys.stderr, "lower bound: %d, actual value: %d, upper bound: %d" % (expected_wm * cell_size, pg_shared_wm_res[pg], (expected_wm + margin) * cell_size) + assert(pg_shared_wm_res[pg] <= (expected_wm + margin) * cell_size) + assert(expected_wm * cell_size <= pg_shared_wm_res[pg]) + + pkts_num = pkts_inc + + # overflow the shared pool + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + print >> sys.stderr, "exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % (pkts_num, (expected_wm * cell_size), pg_shared_wm_res[pg]) + assert(expected_wm == total_shared) + assert(expected_wm * cell_size <= pg_shared_wm_res[pg]) + assert(pg_shared_wm_res[pg] <= (expected_wm + margin) * cell_size) + + finally: + if asic_type == 'mellanox': + # Release port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + +# pg headroom is a notion for lossless traffic only +class PGHeadroomWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + pg = int(self.test_params['pg']) + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + + asic_type = self.test_params['sonic_asic_type'] + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_trig_pfc = int(self.test_params['pkts_num_trig_pfc']) + pkts_num_trig_ingr_drp = int(self.test_params['pkts_num_trig_ingr_drp']) + cell_size = int(self.test_params['cell_size']) + + # Prepare TCP packet data + tos = dscp << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + # Add slight tolerance in threshold characterization to consider + # the case that cpu puts packets in the egress queue after we pause the egress + # or the leak out is simply less than expected as we have occasionally observed + margin = 0 + + if asic_type == 'mellanox': + # Close DST port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + # send packets + try: + # send packets to trigger pfc but not trek into headroom + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_trig_pfc) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + assert(pg_headroom_wm_res[pg] == 0) + + # send packet batch of fixed packet numbers to fill pg headroom + # first round sends only 1 packet + expected_wm = 0 + total_hdrm = pkts_num_trig_ingr_drp - pkts_num_trig_pfc - 1 + pkts_inc = total_hdrm >> 2 + pkts_num = 1 + margin + while (expected_wm < total_hdrm): + expected_wm += pkts_num + if (expected_wm > total_hdrm): + pkts_num -= (expected_wm - total_hdrm) + expected_wm = total_hdrm + print >> sys.stderr, "pkts num to send: %d, total pkts: %d, pg headroom: %d" % (pkts_num, expected_wm, total_hdrm) + + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + print >> sys.stderr, "lower bound: %d, actual value: %d, upper bound: %d" % ((expected_wm - margin) * cell_size, pg_headroom_wm_res[pg], (expected_wm * cell_size)) + assert(pg_headroom_wm_res[pg] <= expected_wm * cell_size) + assert((expected_wm - margin) * cell_size <= pg_headroom_wm_res[pg]) + + pkts_num = pkts_inc + + # overflow the headroom + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[src_port_id]) + print >> sys.stderr, "exceeded pkts num sent: %d, actual value: %d, expected watermark: %d" % (pkts_num, pg_headroom_wm_res[pg], (expected_wm * cell_size)) + assert(expected_wm == total_hdrm) + assert(pg_headroom_wm_res[pg] == expected_wm * cell_size) + + finally: + if asic_type == 'mellanox': + # Release port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + +class QSharedWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + queue = int(self.test_params['queue']) + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + + asic_type = self.test_params['sonic_asic_type'] + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_fill_min = int(self.test_params['pkts_num_fill_min']) + pkts_num_trig_drp = int(self.test_params['pkts_num_trig_drp']) + cell_size = int(self.test_params['cell_size']) + + # Prepare TCP packet data + tos = dscp << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + # Add slight tolerance in threshold characterization to consider + # the case that cpu puts packets in the egress queue after we pause the egress + # or the leak out is simply less than expected as we have occasionally observed + margin = 0 + + if asic_type == 'mellanox': + # Close DST port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + # send packets + try: + # send packets to fill queue min but not trek into shared pool + # so if queue min is zero, it will directly trek into shared pool by 1 + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[dst_port_id]) + print >> sys.stderr, "Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ((pkts_num_leak_out + pkts_num_fill_min), pkts_num_fill_min, q_wm_res[queue]) + assert(q_wm_res[queue] == (0 if pkts_num_fill_min else (1 * cell_size))) + + # send packet batch of fixed packet numbers to fill queue shared + # first round sends only 1 packet + expected_wm = 0 + total_shared = pkts_num_trig_drp - pkts_num_fill_min - 1 + pkts_inc = total_shared >> 2 + pkts_num = 1 + margin + while (expected_wm < total_shared): + expected_wm += pkts_num + if (expected_wm > total_shared): + pkts_num -= (expected_wm - total_shared) + expected_wm = total_shared + print >> sys.stderr, "pkts num to send: %d, total pkts: %d, queue shared: %d" % (pkts_num, expected_wm, total_shared) + + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[dst_port_id]) + print >> sys.stderr, "lower bound: %d, actual value: %d, upper bound: %d" % ((expected_wm - margin) * cell_size, q_wm_res[queue], (expected_wm * cell_size)) + assert(q_wm_res[queue] <= expected_wm * cell_size) + assert((expected_wm - margin) * cell_size <= q_wm_res[queue]) + + pkts_num = pkts_inc + + # overflow the shared pool + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + q_wm_res, pg_shared_wm_res, pg_headroom_wm_res = sai_thrift_read_port_watermarks(self.client, port_list[dst_port_id]) + print >> sys.stderr, "exceeded pkts num sent: %d, actual value: %d, expected watermark: %d" % (pkts_num, q_wm_res[queue], (expected_wm * cell_size)) + assert(expected_wm == total_shared) + assert(q_wm_res[queue] == expected_wm * cell_size) + + finally: + if asic_type == 'mellanox': + # Release port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + +# TODO: buffer pool roid should be obtained via rpc calls +# based on the pg or queue index +# rather than fed in as test parameters due to the lack in SAI implement +class BufferPoolWatermarkTest(sai_base_test.ThriftInterfaceDataPlane): + def runTest(self): + time.sleep(5) + switch_init(self.client) + + # Parse input parameters + dscp = int(self.test_params['dscp']) + ecn = int(self.test_params['ecn']) + router_mac = self.test_params['router_mac'] + print >> sys.stderr, "router_mac: %s" % (router_mac) + pg = self.test_params['pg'] + queue = self.test_params['queue'] + print >> sys.stderr, "pg: %s, queue: %s, buffer pool type: %s" % (pg, queue, 'egress' if not pg else 'ingress') + dst_port_id = int(self.test_params['dst_port_id']) + dst_port_ip = self.test_params['dst_port_ip'] + dst_port_mac = self.dataplane.get_mac(0, dst_port_id) + src_port_id = int(self.test_params['src_port_id']) + src_port_ip = self.test_params['src_port_ip'] + src_port_mac = self.dataplane.get_mac(0, src_port_id) + + asic_type = self.test_params['sonic_asic_type'] + pkts_num_leak_out = int(self.test_params['pkts_num_leak_out']) + pkts_num_fill_min = int(self.test_params['pkts_num_fill_min']) + pkts_num_fill_shared = int(self.test_params['pkts_num_fill_shared']) + cell_size = int(self.test_params['cell_size']) + + print >> sys.stderr, "buf_pool_roid: %s" % (self.test_params['buf_pool_roid']) + buf_pool_roid=int(self.test_params['buf_pool_roid'], 0) + print >> sys.stderr, "buf_pool_roid: 0x%lx" % (buf_pool_roid) + + # Prepare TCP packet data + tos = dscp << 2 + tos |= ecn + ttl = 64 + default_packet_length = 64 + pkt = simple_tcp_packet(pktlen=default_packet_length, + eth_dst=router_mac if router_mac != '' else dst_port_mac, + eth_src=src_port_mac, + ip_src=src_port_ip, + ip_dst=dst_port_ip, + ip_tos=tos, + ip_ttl=ttl) + # Add slight tolerance in threshold characterization to consider + # the case that cpu puts packets in the egress queue after we pause the egress + # or the leak out is simply less than expected as we have occasionally observed + margin = 2 + + if asic_type == 'mellanox': + # Close DST port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client, STOP_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + else: + # Pause egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=0) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) + + # send packets + try: + # send packets to fill min but not trek into shared pool + # so if min is zero, it directly treks into shared pool by 1 + # this is the case for lossy traffic at ingress and lossless traffic at egress (on td2) + # Because lossy and lossless traffic use the same pool at ingress, even if + # lossless traffic has pg min not equal to zero, we still need to consider + # the impact caused by lossy traffic + send_packet(self, src_port_id, pkt, pkts_num_leak_out + pkts_num_fill_min) + time.sleep(8) + buffer_pool_wm = sai_thrift_read_buffer_pool_watermark(self.client, buf_pool_roid) + print >> sys.stderr, "Init pkts num sent: %d, min: %d, actual watermark value to start: %d" % ((pkts_num_leak_out + pkts_num_fill_min), pkts_num_fill_min, buffer_pool_wm) + if pkts_num_fill_min: + assert(buffer_pool_wm <= margin * cell_size) + else: + # on t1-lag, we found vm will keep sending control + # packets, this will cause the watermark to be 2 * 208 bytes + # as all lossy packets are now mapped to single pg 0 + # so we remove the strict equity check, and use upper bound + # check instead + assert(1 * cell_size <= buffer_pool_wm) + assert(buffer_pool_wm <= margin * cell_size) + + # send packet batch of fixed packet numbers to fill shared + # first round sends only 1 packet + expected_wm = 0 + total_shared = pkts_num_fill_shared - pkts_num_fill_min + pkts_inc = total_shared >> 2 + pkts_num = 1 + margin + while (expected_wm < total_shared): + expected_wm += pkts_num + if (expected_wm > total_shared): + pkts_num -= (expected_wm - total_shared) + expected_wm = total_shared + print >> sys.stderr, "pkts num to send: %d, total pkts: %d, shared: %d" % (pkts_num, expected_wm, total_shared) + + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + buffer_pool_wm = sai_thrift_read_buffer_pool_watermark(self.client, buf_pool_roid) + print >> sys.stderr, "lower bound: %d, actual value: %d, upper bound: %d" % (expected_wm * cell_size, buffer_pool_wm, (expected_wm + margin) * cell_size) + assert(buffer_pool_wm <= (expected_wm + margin) * cell_size) + assert(expected_wm * cell_size <= buffer_pool_wm) + + pkts_num = pkts_inc + + # overflow the shared pool + send_packet(self, src_port_id, pkt, pkts_num) + time.sleep(8) + buffer_pool_wm = sai_thrift_read_buffer_pool_watermark(self.client, buf_pool_roid) + print >> sys.stderr, "exceeded pkts num sent: %d, expected watermark: %d, actual value: %d" % (pkts_num, (expected_wm * cell_size), buffer_pool_wm) + assert(expected_wm == total_shared) + assert(expected_wm * cell_size <= buffer_pool_wm) + assert(buffer_pool_wm <= (expected_wm + margin) * cell_size) + + finally: + if asic_type == 'mellanox': + # Release port + sched_prof_id = sai_thrift_create_scheduler_profile(self.client,RELEASE_PORT_MAX_RATE) + attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id],attr) + else: + # Resume egress of dut xmit port + attr_value = sai_thrift_attribute_value_t(booldata=1) + attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PKT_TX_ENABLE, value=attr_value) + self.client.sai_thrift_set_port_attribute(port_list[dst_port_id], attr) diff --git a/ansible/roles/test/files/saitests/switch.py b/ansible/roles/test/files/saitests/switch.py index a5fb00a8989..e81cb616795 100644 --- a/ansible/roles/test/files/saitests/switch.py +++ b/ansible/roles/test/files/saitests/switch.py @@ -70,6 +70,8 @@ def switch_init(client): else: print "unknown switch attribute" + # TOFIX in brcm sai: This causes the following error on td2 (a7050-qx-32s) + # ERR syncd: brcm_sai_set_switch_attribute:842 updating switch mac addr failed with error -2. attr_value = sai_thrift_attribute_value_t(mac='00:77:66:55:44:33') attr = sai_thrift_attribute_t(id=SAI_SWITCH_ATTR_SRC_MAC_ADDRESS, value=attr_value) client.sai_thrift_set_switch_attribute(attr) @@ -85,7 +87,7 @@ def switch_init(client): for interface,front in interface_to_front_mapping.iteritems(): sai_port_id = client.sai_thrift_get_port_id_by_front_port(front); port_list[int(interface)]=sai_port_id - + switch_inited = 1 @@ -544,7 +546,7 @@ def sai_thrift_create_scheduler_profile(client, max_rate, algorithm=0): value=attribute_value) scheduler_attr_list.append(attribute) attribute_value = sai_thrift_attribute_value_t(s32=algorithm) - attribute = sai_thrift_attribute_t(id=SAI_SCHEDULER_ATTR_SCHEDULING_ALGORITHM , + attribute = sai_thrift_attribute_t(id=SAI_SCHEDULER_ATTR_SCHEDULING_TYPE, value=attribute_value) scheduler_attr_list.append(attribute) scheduler_profile_id = client.sai_thrift_create_scheduler_profile(scheduler_attr_list) @@ -618,7 +620,7 @@ def sai_thrift_clear_all_counters(client): def sai_thrift_read_port_counters(client,port): port_cnt_ids=[] port_cnt_ids.append(SAI_PORT_STAT_IF_OUT_DISCARDS) - port_cnt_ids.append(SAI_PORT_STAT_ETHER_STATS_DROP_EVENTS) + port_cnt_ids.append(SAI_PORT_STAT_IF_IN_DISCARDS) port_cnt_ids.append(SAI_PORT_STAT_PFC_0_TX_PKTS) port_cnt_ids.append(SAI_PORT_STAT_PFC_1_TX_PKTS) port_cnt_ids.append(SAI_PORT_STAT_PFC_2_TX_PKTS) @@ -631,6 +633,7 @@ def sai_thrift_read_port_counters(client,port): port_cnt_ids.append(SAI_PORT_STAT_IF_OUT_UCAST_PKTS) counters_results=[] counters_results = client.sai_thrift_get_port_stats(port,port_cnt_ids,len(port_cnt_ids)) + queue_list=[] port_attr_list = client.sai_thrift_get_port_attribute(port) attr_list = port_attr_list.attr_list @@ -650,6 +653,76 @@ def sai_thrift_read_port_counters(client,port): queue1+=1 return (counters_results, queue_counters_results) +def sai_thrift_read_port_watermarks(client,port): + q_wm_ids=[] + q_wm_ids.append(SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES) + + pg_wm_ids=[] + pg_wm_ids.append(SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES) + pg_wm_ids.append(SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES) + + queue_list=[] + pg_list=[] + port_attr_list = client.sai_thrift_get_port_attribute(port) + attr_list = port_attr_list.attr_list + for attribute in attr_list: + if attribute.id == SAI_PORT_ATTR_QOS_QUEUE_LIST: + for queue_id in attribute.value.objlist.object_id_list: + queue_list.append(queue_id) + elif attribute.id == SAI_PORT_ATTR_INGRESS_PRIORITY_GROUP_LIST: + for pg_id in attribute.value.objlist.object_id_list: + pg_list.append(pg_id) + + thrift_results=[] + queue_res=[] + pg_shared_res=[] + pg_headroom_res=[] + + # Only use the first 8 queues (unicast) - multicast queues are not used + for queue in queue_list[:8]: + thrift_results=client.sai_thrift_get_queue_stats(queue,q_wm_ids,len(q_wm_ids)) + queue_res.append(thrift_results[0]) + + for pg in pg_list: + thrift_results=client.sai_thrift_get_pg_stats(pg,pg_wm_ids,len(pg_wm_ids)) + pg_headroom_res.append(thrift_results[0]) + pg_shared_res.append(thrift_results[1]) + + return (queue_res, pg_shared_res, pg_headroom_res) + +def sai_thrift_read_pg_counters(client, port_id): + pg_cntr_ids=[ + SAI_INGRESS_PRIORITY_GROUP_STAT_PACKETS + ] + + # fetch pg ids under port id + pg_ids = [] + port_attrs = client.sai_thrift_get_port_attribute(port_id) + attrs = port_attrs.attr_list + for attr in attrs: + if attr.id == SAI_PORT_ATTR_INGRESS_PRIORITY_GROUP_LIST: + for pg_id in attr.value.objlist.object_id_list: + pg_ids.append(pg_id) + + # get counter values of counter ids of interest under each pg + pg_cntrs=[] + for pg_id in pg_ids: + cntr_vals = client.sai_thrift_get_pg_stats(pg_id, pg_cntr_ids, len(pg_cntr_ids)) + pg_cntrs.append(cntr_vals[0]) + + return pg_cntrs + +def sai_thrift_read_buffer_pool_watermark(client, buffer_pool_id): + buffer_pool_wm_ids = [ + SAI_BUFFER_POOL_STAT_WATERMARK_BYTES + ] + + wm_vals = client.sai_thrift_get_buffer_pool_stats(buffer_pool_id, buffer_pool_wm_ids) + if not wm_vals: + print >> sys.stderr, "sai_thrift_read_buffer_pool_watermark returns empty list" + return None + return wm_vals[0] + def sai_thrift_create_vlan_member(client, vlan_id, port_id, tagging_mode): vlan_member_attr_list = [] attribute_value = sai_thrift_attribute_value_t(s32=vlan_id) diff --git a/ansible/roles/test/tasks/qos_get_max_buff_size.yml b/ansible/roles/test/tasks/qos_get_max_buff_size.yml new file mode 100644 index 00000000000..694aac6c300 --- /dev/null +++ b/ansible/roles/test/tasks/qos_get_max_buff_size.yml @@ -0,0 +1,146 @@ +# Get the mmu buffer config parameters of the target port from the DUT +# to generate the buffer profile for testing and/or +# to calculate the max number of packets the mmu can hold in the single-port sending case that +# only the target port is sending packets + +# TODO: May be better suited as an ansible module + +- debug: + msg="Get {{target_port_name}} port {{target_buffer_profile_type}} MAX buffer size" + +- name: Get {{target_buffer_profile_type}} buffer profile table for {{target_port_name}} port + shell: redis-cli -n 4 KEYS "{{target_table}}|{{target_port_name}}|{{target_pg}}" + register: buffer_profile_table + + +- fail: + msg: "Unable to get {{target_buffer_profile_type}} buffer profile table for {{target_port_name}}" + when: buffer_profile_table.stdout == "" + +- name: Get {{target_buffer_profile_type}} buffer profile for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile_table.stdout}}" profile + register: buffer_profile + + +- fail: + msg: "Unable to get {{target_buffer_profile_type}} buffer profile for {{target_port_name}}" + when: buffer_profile.stdout == "" + +- name: Parse buffer profile name + set_fact: + buffer_profile="{{buffer_profile.stdout|replace('[','')|replace(']','')}}" + + +- name: Get {{target_buffer_profile_type}} buffer headroom size for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile}}" size + register: buffer_headroom + +- fail: + msg: "Unable to get headroom size for {{target_port_name}}" + when: buffer_headroom.stdout == "" + + +- name: Get {{target_buffer_profile_type}} buffer pool profile for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile}}" pool + register: buffer_pool_id + +- name: Parse {{target_buffer_profile_type}} buffer pool profile name + set_fact: + buffer_pool_id="{{buffer_pool_id.stdout|replace('[','')|replace(']','')}}" + +- name: Get {{target_buffer_profile_type}} buffer alpha ID for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile}}" dynamic_th + register: buffer_alpha_raw + +# static threshold +- block: + - debug: + msg: > + "Unable to get {{target_buffer_profile_type}} alpha for {{target_port_name}}\n" + "{{target_buffer_profile_type}} buffer uses static threshold" + + - name: Get {{target_buffer_profile_type}} buffer alpha ID for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile}}" static_th + register: buffer_static_th + + - fail: + msg: "Unable to get {{target_buffer_profile_type}} static threshold for {{target_port_name}}" + when: buffer_static_th.stdout == "" + + - set_fact: + buffer_max_size: "{{buffer_static_th.stdout|int}}" + when: buffer_alpha_raw.stdout == "" + +# dynamic threshold +- block: + - name: Calculate the {{target_buffer_profile_type}} alpha + set_fact: + buffer_alpha="{{2|pow(buffer_alpha_raw.stdout|int)}}" + + - name: Get {{target_buffer_profile_type}} buffer pool size for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_pool_id}}" size + register: buffer_pool_size + + - fail: + msg: "Unable to get {{target_buffer_profile_type}} buffer pool size for {{target_port_name}}" + when: buffer_pool_size.stdout == "" + + - name: Calculate MAX buffer size for {{target_port_name}} port + set_fact: + buffer_max_size="{{buffer_headroom.stdout|int + ((buffer_alpha|float / (buffer_alpha|float + 1)) * buffer_pool_size.stdout|int)}}" + when: buffer_alpha_raw.stdout != "" + + +# ingress lossless specific +- name: Get XON for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile}}" xon + register: buffer_xon + when: buffer_profile != "" and "pg_lossless" in buffer_profile + +- fail: + msg: "Unable to get XON for {{target_port_name}}" + when: "'pg_lossless' in buffer_profile and buffer_xon.stdout == ''" + + +# ingress lossless specific +- name: Get XOFF for {{target_port_name}} port + shell: redis-cli -n 4 HGET "{{buffer_profile}}" xoff + register: buffer_xoff + when: buffer_profile != "" and 'pg_lossless' in buffer_profile + +- fail: + msg: "Unable to get XOFF for {{target_port_name}}" + when: "'pg_lossless' in buffer_profile and buffer_xoff.stdout == ''" + + +# Get buffer pool ROID +# This is perhaps the only useful section in this yaml play +- set_fact: + buffer_pool_name="{{buffer_pool_id|replace('BUFFER_POOL|','')}}" + +- name: Get {{buffer_pool_name}} VOID + shell: redis-cli -n 2 HGET COUNTERS_BUFFER_POOL_NAME_MAP "{{buffer_pool_name}}" + register: buffer_pool_void + +- fail: + msg: "Unable to get VOID for {{buffer_pool_name}}" + when: buffer_pool_void.stdout == "" + +- name: Parse buffer pool VOID + set_fact: + buffer_pool_void="{{buffer_pool_void.stdout}}" + +- name: Get {{buffer_pool_name}} ROID + shell: redis-cli -n 1 HGET VIDTORID "{{buffer_pool_void}}" + register: buffer_pool_roid + +- fail: + msg: "Unable to get ROID for {{buffer_pool_name}}" + when: buffer_pool_roid.stdout == "" + +- name: Parse buffer pool ROID, remove 'oid:' prefix + set_fact: + buffer_pool_roid="{{buffer_pool_roid.stdout|replace('oid:','')}}" + +- debug: + msg="{{buffer_pool_name}} roid {{buffer_pool_roid}}" diff --git a/ansible/roles/test/tasks/qos_get_ports.yml b/ansible/roles/test/tasks/qos_get_ports.yml new file mode 100644 index 00000000000..1da26daa384 --- /dev/null +++ b/ansible/roles/test/tasks/qos_get_ports.yml @@ -0,0 +1,343 @@ +- name: Init variables. + set_fact: + ptf_interfaces: [] + dut_switch_ports: [] + ptf_lag_interfaces: [] + dut_switch_lag_members: [] + testing_ptf_interfaces: [] + +- name: Getting minigraph facts + minigraph_facts: host={{inventory_hostname}} + become: no + +# Index stored in ptf_interfaces list is the index +# to its corresponding connected dut port name in dut_switch_ports list +- name: Get PTF interfaces from map + set_fact: + ptf_interfaces: "{{ptf_interfaces + [item.split('@')[0]]}}" + with_lines: cat {{ptf_portmap}} + when: "'#' not in item" + +- name: Get switch ports from map + set_fact: + dut_switch_ports: "{{dut_switch_ports + [item.split('@')[1]]}}" + with_lines: cat {{ptf_portmap}} + when: "'#' not in item" + +- name: Print switch ports and PTF interfaces + debug: msg="ptf_interfaces={{ptf_interfaces}} dut_switch_ports={{dut_switch_ports}} total_ports={{dut_switch_ports|length}}" + + +- name: Set ptf LAG interfaces + set_fact: + ptf_lag_interfaces: "{{ptf_lag_interfaces + [ (item|replace(\"PortChannel\", \"\")|int / 4)|int ]}}" + with_items: "{{minigraph_portchannels.keys()}}" + +- name: Get switch LAG members + set_fact: + dut_switch_lag_members: "{{dut_switch_lag_members + item['members']}}" + with_items: "{{minigraph_portchannels.values()}}" + +- name: Print LAG members + debug: msg="ptf_lag_interfaces={{ptf_lag_interfaces}} dut_switch_lag_members={{dut_switch_lag_members}}" + + +- name: Init testing port count + set_fact: + testing_ports_count: 0 + testing_ports_id: [] + +- name: Find the not lag ports for testing + set_fact: + testing_ports_id: "{{testing_ports_id + [item]}}" + with_items: "{{ptf_interfaces}}" + when: + - dut_switch_ports[item|int] in minigraph_ports.keys() + - dut_switch_ports[item|int] not in dut_switch_lag_members + - item != '31' # Only for Mellanox testbed. The last port is used for up link from DUT switch + +# TODO: make port selection random +- name: Set DST port ID + set_fact: + dst_port_id: "{{testing_ports_id[0]}}" + +- name: Set DST port 2 ID + set_fact: + dst_port_2_id: "{{testing_ports_id[1]}}" + +- name: Set SRC port ID + set_fact: + src_port_id: "{{testing_ports_id[2]}}" + +- name: Set DST port 3 ID + set_fact: + dst_port_3_id: "{{testing_ports_id[3]}}" + + +- name: Get IPs for non-vlan testing ports + testing_port_ip_facts: + testing_ports_id: "{{testing_ports_id}}" + dut_switch_ports: "{{dut_switch_ports}}" + minigraph_bgp: "{{minigraph_bgp}}" + minigraph_neighbors: "{{minigraph_neighbors}}" + connection: local + +- debug: + var: testing_ports_ip + + +- name: Set unique MACs to PTF interfaces + script: roles/test/files/helpers/change_mac.sh + delegate_to: "{{ptf_host}}" + when: minigraph_vlans | length >0 + + +- set_fact: + vlan_members: "{{minigraph_vlans[minigraph_vlans.keys()[0]]['members']}}" + when: minigraph_vlans | length >0 + +- name: Generate IPs in VLAN range + get_ip_in_range: num="{{dut_switch_ports|length}}" prefix="{{minigraph_vlan_interfaces[0]['addr']}}/{{minigraph_vlan_interfaces[0]['prefixlen']}}" exclude_ips="{{minigraph_vlan_interfaces[0]['addr']}}" + become: no + connection: local + failed_when: False + when: minigraph_vlans | length > 0 + +- debug: + var: generated_ips + +- name: Assign IPs to vlan testing ports + set_fact: + testing_ports_ip: "{{testing_ports_ip | combine({item: generated_ips[item|int].split('/')[0]})}}" + when: + testing_ports_ip[item] is not defined and dut_switch_ports[item|int] in vlan_members + with_items: "{{testing_ports_id}}" + +- debug: + var: testing_ports_ip + + +- name: Set DST port 1 IP + set_fact: + dst_port_ip: "{{testing_ports_ip[dst_port_id]}}" + +- name: Set DST port 2 IP + set_fact: + dst_port_2_ip: "{{testing_ports_ip[dst_port_2_id]}}" + +- name: Set SRC port IP + set_fact: + src_port_ip: "{{testing_ports_ip[src_port_id]}}" + +- name: Set DST port 3 IP + set_fact: + dst_port_3_ip: "{{testing_ports_ip[dst_port_3_id]}}" + + +# Get buffers size +# Ingress lossless +- include: roles/test/tasks/qos_get_max_buff_size.yml + vars: + target_table: 'BUFFER_PG' + target_port_name: "{{dut_switch_ports[src_port_id|int]}}" + target_pg: '3-4' + target_buffer_profile_type: 'ingress lossless' + +- name: Set lossless MAX buffer size + set_fact: + lossless_buffer_max_size: "{{buffer_headroom.stdout|int}}" + +- name: Set lossless ingress buffer pool ROID + set_fact: + lossless_ingr_buf_pool_roid: "{{buffer_pool_roid}}" + + +# Ingress lossy +- include: roles/test/tasks/qos_get_max_buff_size.yml + vars: + target_table: 'BUFFER_PG' + target_port_name: "***{{dut_switch_ports[src_port_id|int]}}***" + target_pg: '0' + target_buffer_profile_type: 'ingress lossy' + +- name: Set lossy MAX buffer size + set_fact: + lossy_buffer_max_size: "{{buffer_max_size}}" + +- name: Set lossy headroom size + set_fact: + lossy_headroom_size: "{{buffer_headroom.stdout|int}}" + +- name: Set lossy ingress buffer pool ROID + set_fact: + lossy_ingr_buf_pool_roid: "{{buffer_pool_roid}}" + + +# Egress lossless +- include: roles/test/tasks/qos_get_max_buff_size.yml + vars: + target_table: 'BUFFER_QUEUE' + target_port_name: "***{{dut_switch_ports[src_port_id|int]}}***" + target_pg: '3-4' + target_buffer_profile_type: 'egress lossless' + +- name: Set MAX queue size for {{dut_switch_ports[src_port_id|int]}} + set_fact: + lossless_queue_max_size: "{{buffer_max_size}}" + +- name: Set lossless egress buffer pool ROID + set_fact: + lossless_egr_buf_pool_roid: "{{buffer_pool_roid}}" + + +# Egress lossy +- include: roles/test/tasks/qos_get_max_buff_size.yml + vars: + target_table: 'BUFFER_QUEUE' + target_port_name: "***{{dut_switch_ports[src_port_id|int]}}***" + target_pg: '0-2' + target_buffer_profile_type: 'egress lossy' + +- name: Set MAX queue size for {{dut_switch_ports[src_port_id|int]}} + set_fact: + lossy_queue_max_size: "{{buffer_max_size}}" + +- name: Set lossy egress buffer pool ROID + set_fact: + lossy_egr_buf_pool_roid: "{{buffer_pool_roid}}" + + +# ECN/WRED +- block: + - name: Determine the target queue of the WRED profile + set_fact: + target_q_wred: '3' + + - debug: var=target_q_wred + +- name: Get the WRED profile key for "{{dut_switch_ports[dst_port_id|int]}}" + shell: redis-cli -n 4 KEYS "QUEUE|{{dut_switch_ports[dst_port_id|int]}}|{{target_q_wred}}" + register: wred_profile_name + +- fail: + msg: "Unable to get the wred profile key for {{dut_switch_ports[dst_port_id|int]}}" + when: wred_profile_name.stdout == "" + +- name: Parse WRED profile key + set_fact: + wred_profile_name="{{wred_profile_name.stdout|replace('[','')|replace(']','')}}" + +- name: Get the WRED profile for "{{dut_switch_ports[dst_port_id|int]}}" + shell: redis-cli -n 4 HGET "{{wred_profile_name}}" wred_profile + register: wred_profile + +- fail: + msg: "Unable to get the buffer profile for {{dut_switch_ports[dst_port_id|int]}}" + when: wred_profile.stdout == "" + +- name: Parse WRED profile name + set_fact: + wred_profile="{{wred_profile.stdout|replace('[','')|replace(']','')}}" + +- name: Get green_max_threshold for {{dut_switch_ports[dst_port_id|int]}} from {{wred_profile}} + shell: redis-cli -n 4 HGET "{{wred_profile}}" green_max_threshold + register: green_max_threshold + +- fail: + msg: "Unable to get the green_max_threshold for {{dut_switch_ports[dst_port_id|int]}} from {{wred_profile}}" + when: green_max_threshold.stdout == "" + +- name: Parse WRED green_max_threshold + set_fact: + green_max_threshold="{{green_max_threshold.stdout|int}}" + +- name: Get yellow_max_threshold for {{dut_switch_ports[dst_port_id|int]}} from {{wred_profile}} + shell: redis-cli -n 4 HGET "{{wred_profile}}" yellow_max_threshold + register: yellow_max_threshold + +- fail: + msg: "Unable to get the yellow_max_threshold for {{dut_switch_ports[dst_port_id|int]}} from {{wred_profile}}" + when: yellow_max_threshold.stdout == "" + +- name: Parse WRED yellow_max_threshold + set_fact: + yellow_max_threshold="{{yellow_max_threshold.stdout|int}}" + +- name: Get red_max_threshold for {{dut_switch_ports[dst_port_id|int]}} from {{wred_profile}} + shell: redis-cli -n 4 HGET "{{wred_profile}}" red_max_threshold + register: red_max_threshold + +- fail: + msg: "Unable to get the red_max_threshold for {{dut_switch_ports[dst_port_id|int]}} from {{wred_profile}}" + when: red_max_threshold.stdout == "" + +- name: Parse WRED red_max_threshold + set_fact: + red_max_threshold="{{red_max_threshold.stdout|int}}" + + +# Get watermark polling status +- debug: + msg="Get watermark counter status before the test" + +- name: Get watermark polling status before the test + shell: redis-cli -n 4 HGET "FLEX_COUNTER_TABLE|QUEUE_WATERMARK" FLEX_COUNTER_STATUS + register: watermark_status + +- debug: + msg="Watermark polling status {{watermark_status.stdout}}" + + +# Get scheduler weight +- name: Set target lossy queue to query the lossy scheduler profile + set_fact: + target_q_sched: '0' + +- debug: var=target_q_sched + +- name: Get lossy scheduler profile for "{{dut_switch_ports[dst_port_id|int]}}" + shell: redis-cli -n 4 HGET "QUEUE|{{dut_switch_ports[dst_port_id|int]}}|{{target_q_sched}}" scheduler + register: lossy_sched_profile + +- fail: + msg: "Unable to get the lossy scheduler profile for {{dut_switch_ports[dst_port_id|int]}}" + when: lossy_sched_profile.stdout == "" + +- name: Process lossy scheduler profile name + set_fact: + lossy_sched_profile="{{lossy_sched_profile.stdout|replace('[','')|replace(']','')}}" + +- name: Get lossy scheduler weight for "{{dut_switch_ports[dst_port_id|int]}}" + shell: redis-cli -n 4 HGET "{{lossy_sched_profile}}" weight + register: lossy_sched_weight + +- fail: + msg: "Unable to get lossy scheduler weight for {{dut_switch_ports[dst_port_id|int]}}" + when: lossy_sched_weight.stdout == "" + + +- name: Set target lossless queue to query the lossless scheduler profile + set_fact: + target_q_sched: '3' + +- debug: var=target_q_sched + +- name: Get lossless scheduler profile "{{dut_switch_ports[dst_port_id|int]}}" + shell: redis-cli -n 4 HGET "QUEUE|{{dut_switch_ports[dst_port_id|int]}}|{{target_q_sched}}" scheduler + register: lossless_sched_profile + +- fail: + msg: "Unable to get the lossless scheduler profile for {{dut_switch_ports[dst_port_id|int]}}" + when: lossless_sched_profile.stdout == "" + +- name: Process lossless scheduler profile name + set_fact: + lossless_sched_profile="{{lossless_sched_profile.stdout|replace('[','')|replace(']','')}}" + +- name: Get lossless scheduler weight for "{{dut_switch_ports[dst_port_id|int]}}" + shell: redis-cli -n 4 HGET "{{lossless_sched_profile}}" weight + register: lossless_sched_weight + +- fail: + msg: "Unable to get lossless scheduler weight for {{dut_switch_ports[dst_port_id|int]}}" + when: lossless_sched_weight.stdout == "" diff --git a/ansible/roles/test/tasks/qos_sai.yml b/ansible/roles/test/tasks/qos_sai.yml new file mode 100644 index 00000000000..c0e6fe42a69 --- /dev/null +++ b/ansible/roles/test/tasks/qos_sai.yml @@ -0,0 +1,604 @@ +# To run ecn test the host system where ptf container resides should have +# optimized sysctl parameter "net.core.rmem_max". Now it's set to 4194304 +# Also the NICs supposed to have maximum buffer size of RX queue +# See: ethtool -g +# ethtool -G p4p1 rx 8192 + +- include_vars: vars/qos.yml + +- block: + - name: Getting minigraph facts + minigraph_facts: host={{inventory_hostname}} + become: no + + - name: check if the device has configured qos parameters + fail: msg="device doesn't have configured qos parameters" + when: minigraph_hwsku is not defined or qos_params[minigraph_hwsku] is not defined + + - name: set qos parameters for the device + set_fact: qp={{qos_params[minigraph_hwsku]}} + + - name: Ensure LLDP Daemon stopped + become: yes + supervisorctl: state=stopped name={{item}} + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python + with_items: + - lldpd + - lldp-syncd + + - name: Disable bgpd + become: yes + lineinfile: dest=/etc/quagga/daemons + regexp=^bgpd=.*$ + line='bgpd=no' + notify: + - Restart Quagga Daemon + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i bgp python + + - meta: flush_handlers + + - block: + - name: Deploy script to DUT/syncd + copy: src=roles/test/files/mlnx/packets_aging.py dest=/root/packets_aging.py + + - name: Disable Mellanox packet aging + shell: python /root/packets_aging.py disable + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python + when: minigraph_hwsku is defined and minigraph_hwsku in mellanox_hwskus + + - name: copy ptf tests + copy: src=roles/test/files/ptftests dest=/root + delegate_to: "{{ptf_host}}" + + - name: copy sai tests + copy: src=roles/test/files/saitests dest=/root + delegate_to: "{{ptf_host}}" + + - name: copy portmap + copy: src={{ptf_portmap}} dest=/root + delegate_to: "{{ptf_host}}" + when: minigraph_hwsku is defined and + (minigraph_hwsku in mellanox_hwskus or minigraph_hwsku == 'Arista-7050-QX-32S' + or minigraph_hwsku == 'Arista-7060CX-32S-C32' or minigraph_hwsku == 'Celestica-DX010-C32' + or minigraph_hwsku == 'Arista-7260CX3-D108C8' or minigraph_hwsku == 'Force10-S6100') + + - name: Init PTF base test parameters + set_fact: + ptf_base_params: + - router_mac={% if testbed_type not in ['t0', 't0-64', 't0-116'] %}'{{ansible_Ethernet0['macaddress']}}'{% else %}''{% endif %} + - server='{{ansible_host}}' + - port_map_file='/root/{{ptf_portmap | basename}}' + - sonic_asic_type='{{sonic_asic_type}}' + + - name: Get ports info. + include: roles/test/tasks/qos_get_ports.yml + + # Unpause all paused port + - include: qos_sai_ptf.yml + vars: + test_name: release all paused ports + test_path: sai_qos_tests.ReleaseAllPorts + test_params: [] + + # Populate arps + - name: Check if DUT has ARP aging issue or not + command: arp -n + become: yes + register: arp_entries + + - debug: + var: arp_entries + + - include: qos_sai_ptf.yml + vars: + test_name: populate arp on all ports + test_path: sai_qos_tests.ARPpopulate + test_params: [] + when: testbed_type in ['t0', 't0-64', 't0-116'] or arp_entries.stdout.find('incomplete') == -1 + + - name: Manually add an ARP entry for dst port + command: ip neigh replace {{dst_port_ip}} lladdr 7c:fe:90:5e:6b:a6 dev {{dut_switch_ports[dst_port_id|int]}} + become: yes + when: testbed_type not in ['t0', 't0-64', 't0-116'] and arp_entries.stdout.find('incomplete') != -1 + + # XOFF limit + - include: qos_sai_ptf.yml + vars: + test_name: xoff limit ptf test dscp = {{qp.xoff_1.dscp}}, ecn = {{qp.xoff_1.ecn}} + test_path: sai_qos_tests.PFCtest + test_params: + - dscp='{{qp.xoff_1.dscp}}' + - ecn='{{qp.xoff_1.ecn}}' + - pg='{{qp.xoff_1.pg}}' + - buffer_max_size='{{lossless_buffer_max_size|int}}' + - queue_max_size='{{lossless_queue_max_size|int}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.xoff_1.pkts_num_leak_out}}' + - pkts_num_trig_pfc='{{qp.xoff_1.pkts_num_trig_pfc}}' + - pkts_num_trig_ingr_drp='{{qp.xoff_1.pkts_num_trig_ingr_drp}}' + + - include: qos_sai_ptf.yml + vars: + test_name: xoff limit ptf test dscp = {{qp.xoff_2.dscp}}, ecn = {{qp.xoff_2.ecn}} + test_path: sai_qos_tests.PFCtest + test_params: + - dscp='{{qp.xoff_2.dscp}}' + - ecn='{{qp.xoff_2.ecn}}' + - pg='{{qp.xoff_2.pg}}' + - buffer_max_size='{{lossless_buffer_max_size|int}}' + - queue_max_size='{{lossless_queue_max_size|int}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.xoff_2.pkts_num_leak_out}}' + - pkts_num_trig_pfc='{{qp.xoff_2.pkts_num_trig_pfc}}' + - pkts_num_trig_ingr_drp='{{qp.xoff_2.pkts_num_trig_ingr_drp}}' + + # XON limit + - include: qos_sai_ptf.yml + vars: + test_name: xon limit ptf test dscp = {{qp.xon_1.dscp}}, ecn = {{qp.xon_1.ecn}} + test_path: sai_qos_tests.PFCXonTest + test_params: + - dscp='{{qp.xon_1.dscp}}' + - ecn='{{qp.xon_1.ecn}}' + - pg='{{qp.xon_1.pg}}' + - buffer_max_size='{{lossless_buffer_max_size|int}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - dst_port_2_id='{{dst_port_2_id}}' + - dst_port_2_ip='{{dst_port_2_ip}}' + - dst_port_3_id='{{dst_port_3_id}}' + - dst_port_3_ip='{{dst_port_3_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.xon_1.pkts_num_leak_out}}' + - pkts_num_trig_pfc='{{qp.xon_1.pkts_num_trig_pfc}}' + - pkts_num_dismiss_pfc='{{qp.xon_1.pkts_num_dismiss_pfc}}' + + - include: qos_sai_ptf.yml + vars: + test_name: xon limit ptf test dscp = {{qp.xon_2.dscp}}, ecn = {{qp.xon_2.ecn}} + test_path: sai_qos_tests.PFCXonTest + test_params: + - dscp='{{qp.xon_2.dscp}}' + - ecn='{{qp.xon_2.ecn}}' + - pg='{{qp.xon_2.pg}}' + - buffer_max_size='{{lossless_buffer_max_size|int}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - dst_port_2_id='{{dst_port_2_id}}' + - dst_port_2_ip='{{dst_port_2_ip}}' + - dst_port_3_id='{{dst_port_3_id}}' + - dst_port_3_ip='{{dst_port_3_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.xon_2.pkts_num_leak_out}}' + - pkts_num_trig_pfc='{{qp.xon_2.pkts_num_trig_pfc}}' + - pkts_num_dismiss_pfc='{{qp.xon_2.pkts_num_dismiss_pfc}}' + + # Headroom pool size + - include: qos_sai_ptf.yml + vars: + test_name: headroom pool size ptf test ecn = {{qp.hdrm_pool_size.ecn}} + test_path: sai_qos_tests.HdrmPoolSizeTest + test_params: + - dscps={{qp.hdrm_pool_size.dscps}} + - ecn={{qp.hdrm_pool_size.ecn}} + - pgs={{qp.hdrm_pool_size.pgs}} + - src_port_ids={{qp.hdrm_pool_size.src_port_ids}} + - src_port_ips=[{% for pid in qp.hdrm_pool_size.src_port_ids %}{% if not loop.last %}'{{testing_ports_ip[pid|string]}}', {% else %}'{{testing_ports_ip[pid|string]}}'{% endif %}{% endfor %}] + - dst_port_id={{qp.hdrm_pool_size.dst_port_id}} + - dst_port_ip='{{testing_ports_ip[qp.hdrm_pool_size.dst_port_id|string]}}' + - pgs_num={{qp.hdrm_pool_size.pgs_num }} + - pkts_num_leak_out={{qp.hdrm_pool_size.pkts_num_leak_out}} + - pkts_num_trig_pfc={{qp.hdrm_pool_size.pkts_num_trig_pfc}} + - pkts_num_hdrm_full={{qp.hdrm_pool_size.pkts_num_hdrm_full}} + - pkts_num_hdrm_partial={{qp.hdrm_pool_size.pkts_num_hdrm_partial}} + when: minigraph_hwsku is defined and + (minigraph_hwsku == 'Arista-7060CX-32S-C32' or minigraph_hwsku == 'Celestica-DX010-C32' or minigraph_hwsku == 'Arista-7260CX3-D108C8' + or minigraph_hwsku == 'Force10-S6100') + + # Lossy queue + - include: qos_sai_ptf.yml + vars: + test_name: Lossy queue, shared buffer dynamic allocation. dscp = {{qp.lossy_queue_1.dscp}}, ecn = {{qp.lossy_queue_1.ecn}} + test_path: sai_qos_tests.LossyQueueTest + test_params: + - dscp='{{qp.lossy_queue_1.dscp}}' + - ecn='{{qp.lossy_queue_1.ecn}}' + - pg='{{qp.lossy_queue_1.pg}}' + - buffer_max_size='{{lossy_buffer_max_size|int}}' + - headroom_size='{{lossy_headroom_size}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - dst_port_2_id='{{dst_port_2_id}}' + - dst_port_2_ip='{{dst_port_2_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.lossy_queue_1.pkts_num_leak_out}}' + - pkts_num_trig_egr_drp='{{qp.lossy_queue_1.pkts_num_trig_egr_drp}}' + + # DSCP to queue mapping + - include: qos_sai_ptf.yml + vars: + test_name: dscp to queue mapping ptf test + test_path: sai_qos_tests.DscpMappingPB + test_params: + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + + # WRR test + - include: qos_sai_ptf.yml + vars: + test_name: DWRR + test_path: sai_qos_tests.WRRtest + test_params: + - ecn='{{qp.wrr.ecn}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - q0_num_of_pkts='{{qp.wrr.q0_num_of_pkts}}' + - q1_num_of_pkts='{{qp.wrr.q1_num_of_pkts}}' + - q2_num_of_pkts='{{qp.wrr.q2_num_of_pkts}}' + - q3_num_of_pkts='{{qp.wrr.q3_num_of_pkts}}' + - q4_num_of_pkts='{{qp.wrr.q4_num_of_pkts}}' + - q5_num_of_pkts='{{qp.wrr.q5_num_of_pkts}}' + - q6_num_of_pkts='{{qp.wrr.q6_num_of_pkts}}' + - limit='{{qp.wrr.limit}}' + - pkts_num_leak_out='{{qp.wrr.pkts_num_leak_out}}' + - debug: + var: out.stdout_lines + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # PG shared watermark test + - include: qos_sai_ptf.yml + vars: + test_name: PG shared watermark test, lossless traffic + test_path: sai_qos_tests.PGSharedWatermarkTest + test_params: + - dscp='{{qp.wm_pg_shared_lossless.dscp}}' + - ecn='{{qp.wm_pg_shared_lossless.ecn}}' + - pg='{{qp.wm_pg_shared_lossless.pg}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_pg_shared_lossless.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_pg_shared_lossless.pkts_num_fill_min}}' + - pkts_num_fill_shared='{{qp.wm_pg_shared_lossless.pkts_num_trig_pfc}}' + - cell_size='{{qp.wm_pg_shared_lossless.cell_size}}' + - debug: + var: out.stdout_lines + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # PG shared watermark test + - include: qos_sai_ptf.yml + vars: + test_name: PG shared watermark test, lossy traffic + test_path: sai_qos_tests.PGSharedWatermarkTest + test_params: + - dscp='{{qp.wm_pg_shared_lossy.dscp}}' + - ecn='{{qp.wm_pg_shared_lossy.ecn}}' + - pg='{{qp.wm_pg_shared_lossy.pg}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_pg_shared_lossy.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_pg_shared_lossy.pkts_num_fill_min}}' + - pkts_num_fill_shared='{{qp.wm_pg_shared_lossy.pkts_num_trig_egr_drp|int - 1}}' + - cell_size='{{qp.wm_pg_shared_lossy.cell_size}}' + - debug: + var: out.stdout_lines + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # PG headroom watermark test + - include: qos_sai_ptf.yml + vars: + test_name: PG headroom watermark test + test_path: sai_qos_tests.PGHeadroomWatermarkTest + test_params: + - dscp='{{qp.wm_pg_headroom.dscp}}' + - ecn='{{qp.wm_pg_headroom.ecn}}' + - pg='{{qp.wm_pg_headroom.pg}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_pg_headroom.pkts_num_leak_out}}' + - pkts_num_trig_pfc='{{qp.wm_pg_headroom.pkts_num_trig_pfc}}' + - pkts_num_trig_ingr_drp='{{qp.wm_pg_headroom.pkts_num_trig_ingr_drp}}' + - cell_size='{{qp.wm_pg_headroom.cell_size}}' + - debug: + var: out.stdout_lines + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # Queue shared watermark test + - include: qos_sai_ptf.yml + vars: + test_name: Queue shared watermark test, lossless traffic + test_path: sai_qos_tests.QSharedWatermarkTest + test_params: + - dscp='{{qp.wm_q_shared_lossless.dscp}}' + - ecn='{{qp.wm_q_shared_lossless.ecn}}' + - queue='{{qp.wm_q_shared_lossless.queue}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_q_shared_lossless.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_q_shared_lossless.pkts_num_fill_min}}' + - pkts_num_trig_drp='{{qp.wm_q_shared_lossless.pkts_num_trig_ingr_drp}}' + - cell_size='{{qp.wm_q_shared_lossless.cell_size}}' + - debug: + var: out.stdout_lines + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # Queue shared watermark test + - include: qos_sai_ptf.yml + vars: + test_name: Queue shared watermark test, lossy traffic + test_path: sai_qos_tests.QSharedWatermarkTest + test_params: + - dscp='{{qp.wm_q_shared_lossy.dscp}}' + - ecn='{{qp.wm_q_shared_lossy.ecn}}' + - queue='{{qp.wm_q_shared_lossy.queue}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_q_shared_lossy.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_q_shared_lossy.pkts_num_fill_min}}' + - pkts_num_trig_drp='{{qp.wm_q_shared_lossy.pkts_num_trig_egr_drp}}' + - cell_size='{{qp.wm_q_shared_lossy.cell_size}}' + - debug: + var: out.stdout_lines + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # buffer pool watermark test + - include: qos_sai_ptf.yml + vars: + test_name: Ingress buffer pool watermark test, lossless traffic + test_path: sai_qos_tests.BufferPoolWatermarkTest + test_params: + - dscp='{{qp.wm_buf_pool_lossless.dscp}}' + - ecn='{{qp.wm_buf_pool_lossless.ecn}}' + - pg='{{qp.wm_buf_pool_lossless.pg}}' + - queue='' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_buf_pool_lossless.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_buf_pool_lossless.pkts_num_fill_ingr_min}}' + - pkts_num_fill_shared='{{qp.wm_buf_pool_lossless.pkts_num_trig_pfc}}' + - cell_size='{{qp.wm_buf_pool_lossless.cell_size}}' + - buf_pool_roid='{{lossless_ingr_buf_pool_roid}}' + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + - debug: + var: out.stdout_lines + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # buffer pool watermark test + - include: qos_sai_ptf.yml + vars: + test_name: Egress buffer pool watermark test, lossless traffic + test_path: sai_qos_tests.BufferPoolWatermarkTest + test_params: + - dscp='{{qp.wm_buf_pool_lossless.dscp}}' + - ecn='{{qp.wm_buf_pool_lossless.ecn}}' + - pg='' + - queue='{{qp.wm_buf_pool_lossless.queue}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_buf_pool_lossless.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_buf_pool_lossless.pkts_num_fill_egr_min}}' + - pkts_num_fill_shared='{{qp.wm_buf_pool_lossless.pkts_num_trig_ingr_drp|int - 1}}' + - cell_size='{{qp.wm_buf_pool_lossless.cell_size}}' + - buf_pool_roid='{{lossless_egr_buf_pool_roid}}' + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + - debug: + var: out.stdout_lines + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # buffer pool watermark test + - include: qos_sai_ptf.yml + vars: + test_name: Ingress buffer pool watermark test, lossy traffic + test_path: sai_qos_tests.BufferPoolWatermarkTest + test_params: + - dscp='{{qp.wm_buf_pool_lossy.dscp}}' + - ecn='{{qp.wm_buf_pool_lossy.ecn}}' + - pg='{{qp.wm_buf_pool_lossy.pg}}' + - queue='' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_buf_pool_lossy.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_buf_pool_lossy.pkts_num_fill_ingr_min}}' + - pkts_num_fill_shared='{{qp.wm_buf_pool_lossy.pkts_num_trig_egr_drp|int - 1}}' + - cell_size='{{qp.wm_buf_pool_lossy.cell_size}}' + - buf_pool_roid='{{lossy_ingr_buf_pool_roid}}' + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + - debug: + var: out.stdout_lines + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + + # Clear all watermarks before each watermark test + # because of the clear on read polling mode + - name: Toggle watermark polling + shell: bash -c 'counterpoll watermark enable; sleep 20; counterpoll watermark disable' + + # buffer pool watermark test + - include: qos_sai_ptf.yml + vars: + test_name: Egress buffer pool watermark test, lossy traffic + test_path: sai_qos_tests.BufferPoolWatermarkTest + test_params: + - dscp='{{qp.wm_buf_pool_lossy.dscp}}' + - ecn='{{qp.wm_buf_pool_lossy.ecn}}' + - pg='' + - queue='{{qp.wm_buf_pool_lossy.queue}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - pkts_num_leak_out='{{qp.wm_buf_pool_lossy.pkts_num_leak_out}}' + - pkts_num_fill_min='{{qp.wm_buf_pool_lossy.pkts_num_fill_egr_min}}' + - pkts_num_fill_shared='{{qp.wm_buf_pool_lossy.pkts_num_trig_egr_drp|int - 1}}' + - cell_size='{{qp.wm_buf_pool_lossy.cell_size}}' + - buf_pool_roid='{{lossy_egr_buf_pool_roid}}' + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + - debug: + var: out.stdout_lines + when: minigraph_hwsku is defined and + (minigraph_hwsku != 'Arista-7050-Qx-32S') + + # DSCP to pg mapping + - include: qos_sai_ptf.yml + vars: + test_name: dscp to pg mapping ptf test + test_path: sai_qos_tests.DscpToPgMapping + test_params: + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - debug: + var: out.stdout_lines + + # Change lossy and lossless scheduler weights + - name: Change lossy scheduler weight to {{qp.wrr_chg.lossy_weight}} + command: redis-cli -n 4 HSET "{{lossy_sched_profile}}" weight {{qp.wrr_chg.lossy_weight}} + + - name: Change lossless scheduler weight to {{qp.wrr_chg.lossless_weight}} + command: redis-cli -n 4 HSET "{{lossless_sched_profile}}" weight {{qp.wrr_chg.lossless_weight}} + + # WRR test + - include: qos_sai_ptf.yml + vars: + test_name: DWRR runtime weight change + test_path: sai_qos_tests.WRRtest + test_params: + - ecn='{{qp.wrr_chg.ecn}}' + - dst_port_id='{{dst_port_id}}' + - dst_port_ip='{{dst_port_ip}}' + - src_port_id='{{src_port_id}}' + - src_port_ip='{{src_port_ip}}' + - q0_num_of_pkts='{{qp.wrr_chg.q0_num_of_pkts}}' + - q1_num_of_pkts='{{qp.wrr_chg.q1_num_of_pkts}}' + - q2_num_of_pkts='{{qp.wrr_chg.q2_num_of_pkts}}' + - q3_num_of_pkts='{{qp.wrr_chg.q3_num_of_pkts}}' + - q4_num_of_pkts='{{qp.wrr_chg.q4_num_of_pkts}}' + - q5_num_of_pkts='{{qp.wrr_chg.q5_num_of_pkts}}' + - q6_num_of_pkts='{{qp.wrr_chg.q6_num_of_pkts}}' + - limit='{{qp.wrr_chg.limit}}' + - pkts_num_leak_out='{{qp.wrr_chg.pkts_num_leak_out}}' + - debug: + var: out.stdout_lines + + # Restore lossy and lossless scheduler weights + - name: Restore lossy scheduler weight to {{lossy_sched_weight}} + command: redis-cli -n 4 HSET "{{lossy_sched_profile}}" weight "{{lossy_sched_weight.stdout}}" + + - name: Restore lossless scheduler weight to {{lossless_sched_weight}} + command: redis-cli -n 4 HSET "{{lossless_sched_profile}}" weight "{{lossless_sched_weight.stdout}}" + + always: + - name: Restore LLDP Daemon + become: yes + supervisorctl: state=started name={{item}} + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i lldp python + with_items: + - lldpd + - lldp-syncd + + - name: Enable bgpd + become: yes + lineinfile: dest=/etc/quagga/daemons + regexp=^bgpd=.*$ + line='bgpd=yes' + notify: + - Restart Quagga Daemon + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i bgp python + + - name: Restore original watermark polling status + shell: counterpoll watermark {{watermark_status.stdout}} + when: watermark_status.stdout == "enable" or watermark_status.stdout == "disable" + + - name: Restore lossy scheduler weight to {{lossy_sched_weight}} + command: redis-cli -n 4 HSET "{{lossy_sched_profile}}" weight "{{lossy_sched_weight.stdout}}" + + - name: Restore lossless scheduler weight to {{lossless_sched_weight}} + command: redis-cli -n 4 HSET "{{lossless_sched_profile}}" weight "{{lossless_sched_weight.stdout}}" + + - name: Enable Mellanox packet aging + shell: python /root/packets_aging.py enable + vars: + ansible_shell_type: docker + ansible_python_interpreter: docker exec -i syncd python + when: minigraph_hwsku is defined and minigraph_hwsku in mellanox_hwskus + + - meta: flush_handlers diff --git a/ansible/roles/test/tasks/qos_sai_ptf.yml b/ansible/roles/test/tasks/qos_sai_ptf.yml new file mode 100644 index 00000000000..2981755ba79 --- /dev/null +++ b/ansible/roles/test/tasks/qos_sai_ptf.yml @@ -0,0 +1,16 @@ +- name: Set parameters for specific test + set_fact: ptf_qos_params="{{ptf_base_params + test_params}}" + +- name: "{{test_name}}" + shell: ptf --test-dir saitests {{test_path}} --platform-dir ptftests --platform remote -t "{{ptf_qos_params|join(';')}}" --disable-ipv6 --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre {{extra_options | default("")}} 2>&1 + args: + chdir: /root + delegate_to: "{{ptf_host}}" + failed_when: False + register: out + +- debug: var=out.stdout_lines + when: out.rc != 0 + +- fail: msg="Failed test '{{test_name}}'" + when: out.rc != 0 diff --git a/ansible/roles/test/templates/qos_lossy_profile.j2 b/ansible/roles/test/templates/qos_lossy_profile.j2 new file mode 100644 index 00000000000..2b377863015 --- /dev/null +++ b/ansible/roles/test/templates/qos_lossy_profile.j2 @@ -0,0 +1,24 @@ +{ + {% if pfc_generate_buffer_profile == 'True' %} + "BUFFER_PROFILE": { + "pg_lossy_TEST_profile": { + "dynamic_th": "-8", + "pool": "[{{ buffer_pool_id }}]", + {% if buffer_headroom.stdout != '0' %} + "size": "{{ buffer_headroom.stdout }}" + {% else %} + "size": "15000" + {% endif %} + } + }, + {% endif %} + "BUFFER_PG": { + "{{ dut_switch_ports[src_port_id|int] }}|0-1": { + {% if pfc_generate_buffer_profile == 'True' %} + "profile": "[BUFFER_PROFILE|pg_lossy_TEST_profile]" + {% else %} + "profile": "[{{ buffer_profile }}]" + {% endif %} + } + } +} diff --git a/ansible/roles/test/templates/qos_pfc_profile.j2 b/ansible/roles/test/templates/qos_pfc_profile.j2 new file mode 100644 index 00000000000..9b0650ea5e3 --- /dev/null +++ b/ansible/roles/test/templates/qos_pfc_profile.j2 @@ -0,0 +1,22 @@ +{ + {% if pfc_generate_buffer_profile == 'True' %} + "BUFFER_PROFILE": { + "pg_lossless_PFC_TEST_profile": { + "xon": "{{ buffer_xon.stdout }}", + "dynamic_th": "-8", + "xoff": "{{ buffer_xoff.stdout }}", + "pool": "[{{ buffer_pool_id }}]", + "size": "{{ buffer_headroom.stdout }}" + } + }, + {% endif %} + "BUFFER_PG": { + "{{ dut_switch_ports[src_port_id|int] }}|3-4": { + {% if pfc_generate_buffer_profile == 'True' %} + "profile": "[BUFFER_PROFILE|pg_lossless_PFC_TEST_profile]" + {% else %} + "profile": "[{{ buffer_profile }}]" + {% endif %} + } + } +} diff --git a/ansible/roles/test/vars/testcases.yml b/ansible/roles/test/vars/testcases.yml index 2aba734fc89..b5ee093d546 100644 --- a/ansible/roles/test/vars/testcases.yml +++ b/ansible/roles/test/vars/testcases.yml @@ -192,6 +192,10 @@ testcases: qos: filename: qos.yml topologies: [ptf32, ptf64] + + qos_sai: + filename: qos_sai.yml + topologies: [ptf32, ptf64, t1, t1-lag, t0, t0-64, t0-116] reboot: filename: reboot.yml diff --git a/ansible/vars/qos.yml b/ansible/vars/qos.yml new file mode 100644 index 00000000000..201411e9cc3 --- /dev/null +++ b/ansible/vars/qos.yml @@ -0,0 +1,712 @@ +--- + +# TBD for ACS-MSN2700 xon_1, xon_2: +# Once the new fw version with the fix is burned, should change the +# xon_th_pkts to 10687 +# xoff_th_pkts to 0 +# since xon_th and xoff_th are configured to the same value. +# The current parameters are according to current fw behavior +# +# ecn: +# Dictate the ECN field in assembling a packet +# 0 - not-ECT; 1 - ECT(1); 2 - ECT(0); 3 - CE +# +# ecn_* profile is for ECN limit test, which is removed +# +# Arista-7260CX3-D108C8: +# xoff_1 for 50G +# xoff_2 for 100G +qos_params: + ACS-MSN2700: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + lossy_queue: + dscp: 8 + ecn: 1 + pg: 1 + wrr: + ecn: 1 + q0_num_of_pkts: 600 + q1_num_of_pkts: 400 + q3_num_of_pkts: 500 + q4_num_of_pkts: 500 + limit: 80 + Mellanox-SN2700: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 0 + pkts_num_trig_pfc: 11115 + pkts_num_trig_ingr_drp: 11213 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 0 + pkts_num_trig_pfc: 11115 + pkts_num_trig_ingr_drp: 11213 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 0 + pkts_num_trig_pfc: 11115 + pkts_num_dismiss_pfc: 10924 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 0 + pkts_num_trig_pfc: 11115 + pkts_num_dismiss_pfc: 10924 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + lossy_queue: + dscp: 8 + ecn: 1 + pg: 1 + pkts_num_leak_out: 0 + pkts_num_trig_egr_drp: 48547 + wrr: + ecn: 1 + q0_num_of_pkts: 600 + q1_num_of_pkts: 400 + q3_num_of_pkts: 500 + q4_num_of_pkts: 500 + limit: 80 + pkts_num_leak_out: 0 + ACS-MSN2740: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + lossy_queue: + dscp: 8 + ecn: 1 + pg: 1 + wrr: + ecn: 1 + q0_num_of_pkts: 600 + q1_num_of_pkts: 400 + q3_num_of_pkts: 500 + q4_num_of_pkts: 500 + limit: 80 + Arista-7050-QX-32S: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 48 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 48 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 48 + pkts_num_trig_pfc: 4898 + pkts_num_dismiss_pfc: 12 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 48 + pkts_num_trig_pfc: 4898 + pkts_num_dismiss_pfc: 12 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 1 + pkts_num_leak_out: 48 + pkts_num_trig_egr_drp: 31322 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + pkts_num_leak_out: 48 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 48 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 4898 + cell_size: 208 + wm_pg_shared_lossy: + dscp: 1 + ecn: 1 + pg: 0 + pkts_num_leak_out: 48 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 31322 + cell_size: 208 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 48 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_leak_out: 48 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 5164 + cell_size: 208 + wm_q_shared_lossy: + dscp: 1 + ecn: 1 + queue: 1 + pkts_num_leak_out: 48 + pkts_num_fill_min: 8 + pkts_num_trig_egr_drp: 31322 + cell_size: 208 + Force10-S6100: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 19 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 1979 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 19 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 1979 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 19 + pkts_num_trig_pfc: 1458 + pkts_num_dismiss_pfc: 11 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 19 + pkts_num_trig_pfc: 1458 + pkts_num_dismiss_pfc: 11 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 1 + pkts_num_leak_out: 19 + pkts_num_trig_egr_drp: 9887 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + pkts_num_leak_out: 19 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [25, 26, 27, 40, 41] + dst_port_id: 24 + pgs_num: 10 + pkts_num_leak_out: 19 + pkts_num_trig_pfc: 732 + pkts_num_hdrm_full: 520 + pkts_num_hdrm_partial: 361 + Arista-7060CX-32S-C32: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 2751 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 2751 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_dismiss_pfc: 11 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_dismiss_pfc: 11 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_leak_out: 36 + pkts_num_trig_egr_drp: 9887 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + pkts_num_leak_out: 36 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [17, 18] + dst_port_id: 16 + pgs_num: 4 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1095 + pkts_num_hdrm_full: 1292 + pkts_num_hdrm_partial: 1165 + Celestica-DX010-C32: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 2751 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 2751 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_dismiss_pfc: 11 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_dismiss_pfc: 11 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_leak_out: 36 + pkts_num_trig_egr_drp: 9887 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + pkts_num_leak_out: 36 + wrr_chg: + ecn: 1 + q0_num_of_pkts: 80 + q1_num_of_pkts: 80 + q2_num_of_pkts: 80 + q3_num_of_pkts: 300 + q4_num_of_pkts: 300 + q5_num_of_pkts: 80 + q6_num_of_pkts: 80 + limit: 80 + pkts_num_leak_out: 36 + lossy_weight: 8 + lossless_weight: 30 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [17, 18] + dst_port_id: 16 + pgs_num: 4 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1095 + pkts_num_hdrm_full: 1292 + pkts_num_hdrm_partial: 1165 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 36 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 1458 + cell_size: 208 + wm_pg_shared_lossy: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_leak_out: 36 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 9887 + cell_size: 208 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 36 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 2751 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_leak_out: 36 + pkts_num_fill_min: 8 + pkts_num_trig_ingr_drp: 2751 + cell_size: 208 + wm_q_shared_lossy: + dscp: 8 + ecn: 1 + queue: 0 + pkts_num_leak_out: 36 + pkts_num_fill_min: 8 + pkts_num_trig_egr_drp: 9887 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_leak_out: 36 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 1458 + pkts_num_trig_ingr_drp: 2751 + pkts_num_fill_egr_min: 8 + cell_size: 208 + wm_buf_pool_lossy: + dscp: 8 + ecn: 1 + pg: 0 + queue: 0 + pkts_num_leak_out: 36 + pkts_num_fill_ingr_min: 0 + pkts_num_trig_egr_drp: 9887 + pkts_num_fill_egr_min: 8 + cell_size: 208 + Arista-7260CX3-D108C8: + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 22 + pkts_num_trig_pfc: 4386 + pkts_num_trig_ingr_drp: 4657 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 31 + pkts_num_trig_pfc: 4386 + pkts_num_trig_ingr_drp: 4853 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_leak_out: 22 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_leak_out: 22 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue: + dscp: 8 + ecn: 1 + pg: 1 + pkts_num_leak_out: 22 + pkts_num_trig_egr_drp: 10522 + wrr: + ecn: 1 + q0_num_of_pkts: 600 + q1_num_of_pkts: 400 + q3_num_of_pkts: 500 + q4_num_of_pkts: 500 + limit: 80 + pkts_num_leak_out: 22