diff --git a/ansible/roles/test/files/mlnx/packets_aging.py b/ansible/roles/test/files/mlnx/packets_aging.py deleted file mode 100755 index 584962a3d26..00000000000 --- a/ansible/roles/test/files/mlnx/packets_aging.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python -''' -This file contains Python script to enable/disable packets aging in queues(buffers?). -''' - -import sys, errno -import os -import argparse -from python_sdk_api.sx_api import (SX_STATUS_SUCCESS, - sx_api_open, - sx_api_port_device_get, - sx_api_port_sll_set, - sx_api_port_sll_get, - sx_api_port_hll_set, - new_sx_port_attributes_t_arr, - new_uint32_t_p, - uint32_t_p_assign, - uint32_t_p_value, - sx_port_attributes_t_arr_getitem) - -parser = argparse.ArgumentParser(description='Toggle Mellanox-specific packet aging on egress queues') -parser.add_argument('command', choices=['enable', 'disable'], type=str, help='Enable/Disable packet aging') -args = parser.parse_args() - -# Open SDK -rc, handle = sx_api_open(None) -if (rc != SX_STATUS_SUCCESS): - print >> sys.stderr, "Failed to open api handle.\nPlease check that SDK is running." - sys.exit(errno.EACCES) - -# Get list of ports -port_attributes_list = new_sx_port_attributes_t_arr(64) -port_cnt_p = new_uint32_t_p() -uint32_t_p_assign(port_cnt_p, 64) - -rc = sx_api_port_device_get(handle, 1 , 0, port_attributes_list, port_cnt_p) -if (rc != SX_STATUS_SUCCESS): - print >> sys.stderr, "An error returned by sx_api_port_device_get." - sys.exit() -port_cnt = uint32_t_p_value(port_cnt_p) - -set_mode = False -if args.command == "enable": # enable packets aging - sll_time = 0x418937 - hll_time = 0x83127 - hll_stall = 7 - set_mode = True -else: - assert args.command == "disable" # disable packets aging - sll_time = 0xffffffffffffffff - hll_time = 0xffffffff - hll_stall = 0 - set_mode = True - -if set_mode: - rc = sx_api_port_sll_set(handle, sll_time) - if (rc != SX_STATUS_SUCCESS): - print >> sys.stderr, "An error returned by sx_api_port_sll_set." - sys.exit() -else: - sll_p = new_uint64_t_p() - rc = sx_api_port_sll_get(handle, sll_p) - if (rc != SX_STATUS_SUCCESS): - print >> sys.stderr, "An error returned by sx_api_port_sll_get." - sys.exit() - else: - sll = uint64_t_p_value(sll_p) - print >> sys.stderr, ("sll_max_time=0x%X" % sll) - -for i in range(0, port_cnt): - port_attributes = sx_port_attributes_t_arr_getitem(port_attributes_list,i) - log_port = int(port_attributes.log_port) - if log_port < 0xFFFFF: # only physical ports - if set_mode: - rc = sx_api_port_hll_set(handle, log_port, hll_time, hll_stall) - if (rc != SX_STATUS_SUCCESS): - print >> sys.stderr, "An error returned by sx_api_port_hll_set." - sys.exit() - else: - hll_max_time_p = new_uint32_t_p() - hll_stall_cnt_p = new_uint32_t_p() - rc = sx_api_port_hll_get(handle,log_port, hll_max_time_p, hll_stall_cnt_p) - if (rc != SX_STATUS_SUCCESS): - print >> sys.stderr, "An error returned by sx_api_port_hll_set." - sys.exit() - else: - hll_max_time = uint32_t_p_value(hll_max_time_p) - hll_stall_cnt = uint32_t_p_value(hll_stall_cnt_p) - print >> sys.stderr, ("Port%d(Ethernet%d, logical:0x%X) hll_time:0x%X, hll_stall:0x%X" % - (port_attributes.port_mapping.module_port, (port_attributes.port_mapping.module_port * 4), - log_port, hll_max_time, hll_stall_cnt)) diff --git a/ansible/roles/test/files/mlnx/packets_aging.py b/ansible/roles/test/files/mlnx/packets_aging.py new file mode 120000 index 00000000000..0ff5445a6da --- /dev/null +++ b/ansible/roles/test/files/mlnx/packets_aging.py @@ -0,0 +1 @@ +../../../../../tests/qos/files/mellanox/packets_aging.py \ No newline at end of file diff --git a/ansible/roles/test/files/saitests b/ansible/roles/test/files/saitests new file mode 120000 index 00000000000..5db6e885a73 --- /dev/null +++ b/ansible/roles/test/files/saitests @@ -0,0 +1 @@ +../../../../tests/saitests/ \ No newline at end of file diff --git a/ansible/vars/qos.yml b/ansible/vars/qos.yml deleted file mode 100644 index 32362506fcd..00000000000 --- a/ansible/vars/qos.yml +++ /dev/null @@ -1,728 +0,0 @@ ---- - -# TBD for ACS-MSN2700 xon_1, xon_2: -# Once the new fw version with the fix is burned, should change the -# xon_th_pkts to 10687 -# xoff_th_pkts to 0 -# since xon_th and xoff_th are configured to the same value. -# The current parameters are according to current fw behavior -# -# ecn: -# Dictate the ECN field in assembling a packet -# 0 - not-ECT; 1 - ECT(1); 2 - ECT(0); 3 - CE -# -# ecn_* profile is for ECN limit test, which is removed -# -# Arista-7260CX3-D108C8: -# xoff_1 for 50G -# xoff_2 for 100G -qos_params: - spc1: - 40000_5m: - pkts_num_leak_out: 0 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 22038 - pkts_num_trig_ingr_drp: 22115 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 22038 - pkts_num_trig_ingr_drp: 22115 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 22038 - pkts_num_trig_ingr_drp: 22115 - cell_size: 96 - packet_size: 300 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 22115 - cell_size: 96 - 40000_40m: - pkts_num_leak_out: 0 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 22038 - pkts_num_trig_ingr_drp: 22190 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 22038 - pkts_num_trig_ingr_drp: 22190 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 22038 - pkts_num_trig_ingr_drp: 22190 - cell_size: 96 - packet_size: 300 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 22190 - cell_size: 96 - xon_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 22038 - pkts_num_dismiss_pfc: 21847 - xon_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 22038 - pkts_num_dismiss_pfc: 21847 - ecn_1: - dscp: 8 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 96 - ecn_2: - dscp: 8 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 96 - ecn_3: - dscp: 0 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 96 - ecn_4: - dscp: 0 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 96 - lossy_queue_1: - dscp: 8 - ecn: 1 - pg: 0 - pkts_num_trig_egr_drp: 67965 - wm_pg_shared_lossless: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_fill_min: 6 - pkts_num_trig_pfc: 22038 - cell_size: 96 - packet_size: 300 - wm_pg_shared_lossy: - dscp: 1 - ecn: 1 - pg: 0 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 67965 - cell_size: 96 - packet_size: 300 - wm_q_shared_lossy: - dscp: 1 - ecn: 1 - queue: 1 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 67965 - cell_size: 96 - wrr: - ecn: 1 - q0_num_of_pkts: 600 - q1_num_of_pkts: 400 - q3_num_of_pkts: 500 - q4_num_of_pkts: 500 - limit: 80 - td2: - 40000_5m: - pkts_num_leak_out: 48 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - cell_size: 208 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 5164 - cell_size: 208 - wm_buf_pool_lossless: - dscp: 3 - ecn: 1 - pg: 3 - queue: 3 - pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - pkts_num_fill_egr_min: 0 - cell_size: 208 - 40000_300m: - pkts_num_leak_out: 48 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - cell_size: 208 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 5164 - cell_size: 208 - wm_buf_pool_lossless: - dscp: 3 - ecn: 1 - pg: 3 - queue: 3 - pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 4898 - pkts_num_trig_ingr_drp: 5164 - pkts_num_fill_egr_min: 0 - cell_size: 208 - xon_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4898 - pkts_num_dismiss_pfc: 12 - xon_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 4898 - pkts_num_dismiss_pfc: 12 - ecn_1: - dscp: 8 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 208 - ecn_2: - dscp: 8 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 208 - ecn_3: - dscp: 0 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 208 - ecn_4: - dscp: 0 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 208 - lossy_queue_1: - dscp: 8 - ecn: 1 - pg: 0 - pkts_num_trig_egr_drp: 31322 - wrr: - ecn: 1 - q0_num_of_pkts: 140 - q1_num_of_pkts: 140 - q2_num_of_pkts: 140 - q3_num_of_pkts: 150 - q4_num_of_pkts: 150 - q5_num_of_pkts: 140 - q6_num_of_pkts: 140 - limit: 80 - wrr_chg: - ecn: 1 - q0_num_of_pkts: 80 - q1_num_of_pkts: 80 - q2_num_of_pkts: 80 - q3_num_of_pkts: 300 - q4_num_of_pkts: 300 - q5_num_of_pkts: 80 - q6_num_of_pkts: 80 - limit: 80 - lossy_weight: 8 - lossless_weight: 30 - wm_pg_shared_lossless: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_fill_min: 6 - pkts_num_trig_pfc: 4898 - packet_size: 64 - cell_size: 208 - wm_pg_shared_lossy: - dscp: 1 - ecn: 1 - pg: 0 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 31322 - packet_size: 64 - cell_size: 208 - wm_q_shared_lossy: - dscp: 1 - ecn: 1 - queue: 1 - pkts_num_fill_min: 8 - pkts_num_trig_egr_drp: 31322 - cell_size: 208 - wm_buf_pool_lossy: - dscp: 8 - ecn: 1 - pg: 0 - queue: 0 - pkts_num_fill_ingr_min: 0 - pkts_num_trig_egr_drp: 31322 - pkts_num_fill_egr_min: 8 - cell_size: 208 - th: - 40000_300m: - pkts_num_leak_out: 19 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7063 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7063 - hdrm_pool_size: - dscps: [3, 4] - ecn: 1 - pgs: [3, 4] - src_port_ids: [25, 26, 27, 40, 41] - dst_port_id: 24 - pgs_num: 10 - pkts_num_trig_pfc: 1194 - pkts_num_hdrm_full: 520 - pkts_num_hdrm_partial: 361 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7063 - cell_size: 208 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 8 - pkts_num_trig_ingr_drp: 7063 - cell_size: 208 - wm_buf_pool_lossless: - dscp: 3 - ecn: 1 - pg: 3 - queue: 3 - pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7063 - pkts_num_fill_egr_min: 8 - cell_size: 208 - 100000_300m: - pkts_num_leak_out: 36 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7835 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7835 - hdrm_pool_size: - dscps: [3, 4] - ecn: 1 - pgs: [3, 4] - src_port_ids: [17, 18] - dst_port_id: 16 - pgs_num: 4 - pkts_num_trig_pfc: 2620 - pkts_num_hdrm_full: 1292 - pkts_num_hdrm_partial: 1165 - wm_pg_shared_lossless: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_fill_min: 6 - pkts_num_trig_pfc: 6542 - cell_size: 208 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7835 - cell_size: 208 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 8 - pkts_num_trig_ingr_drp: 7835 - cell_size: 208 - wm_buf_pool_lossless: - dscp: 3 - ecn: 1 - pg: 3 - queue: 3 - pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 6542 - pkts_num_trig_ingr_drp: 7835 - pkts_num_fill_egr_min: 8 - cell_size: 208 - xon_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 6542 - pkts_num_dismiss_pfc: 11 - xon_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 6542 - pkts_num_dismiss_pfc: 11 - ecn_1: - dscp: 8 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 208 - ecn_2: - dscp: 8 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 208 - ecn_3: - dscp: 0 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 208 - ecn_4: - dscp: 0 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 208 - lossy_queue_1: - dscp: 8 - ecn: 1 - pg: 0 - pkts_num_trig_egr_drp: 9887 - wrr: - ecn: 1 - q0_num_of_pkts: 140 - q1_num_of_pkts: 140 - q2_num_of_pkts: 140 - q3_num_of_pkts: 150 - q4_num_of_pkts: 150 - q5_num_of_pkts: 140 - q6_num_of_pkts: 140 - limit: 80 - wrr_chg: - ecn: 1 - q0_num_of_pkts: 80 - q1_num_of_pkts: 80 - q2_num_of_pkts: 80 - q3_num_of_pkts: 300 - q4_num_of_pkts: 300 - q5_num_of_pkts: 80 - q6_num_of_pkts: 80 - limit: 80 - lossy_weight: 8 - lossless_weight: 30 - wm_pg_shared_lossless: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_fill_min: 6 - pkts_num_trig_pfc: 6542 - packet_size: 64 - cell_size: 208 - wm_pg_shared_lossy: - dscp: 8 - ecn: 1 - pg: 0 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 9887 - packet_size: 64 - cell_size: 208 - wm_q_shared_lossy: - dscp: 8 - ecn: 1 - queue: 0 - pkts_num_fill_min: 8 - pkts_num_trig_egr_drp: 9887 - cell_size: 208 - wm_buf_pool_lossy: - dscp: 8 - ecn: 1 - pg: 0 - queue: 0 - pkts_num_fill_ingr_min: 0 - pkts_num_trig_egr_drp: 9887 - pkts_num_fill_egr_min: 8 - cell_size: 208 - th2: - 40000_300m: - pkts_num_leak_out: 0 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 4978 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 4978 - hdrm_pool_size: - dscps: [3, 4] - ecn: 1 - pgs: [3, 4] - src_port_ids: [6, 7, 8, 9, 10, 38, 39, 40, 41, 42] - dst_port_id: 32 - pgs_num: 19 - pkts_num_trig_pfc: 1490 - pkts_num_hdrm_full: 520 - pkts_num_hdrm_partial: 47 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 4978 - cell_size: 208 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 4978 - cell_size: 208 - wm_buf_pool_lossless: - dscp: 3 - ecn: 1 - pg: 3 - queue: 3 - pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 4978 - pkts_num_fill_egr_min: 16 - cell_size: 208 - 50000_300m: - pkts_num_leak_out: 0 - xoff_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 5140 - xoff_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 5140 - hdrm_pool_size: - dscps: [3, 4] - ecn: 1 - pgs: [3, 4] - src_port_ids: [1, 2, 3, 4, 5, 6, 7] - dst_port_id: 0 - pgs_num: 14 - pkts_num_trig_pfc: 1826 - pkts_num_hdrm_full: 682 - pkts_num_hdrm_partial: 542 - wm_pg_headroom: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 5140 - cell_size: 208 - wm_q_shared_lossless: - dscp: 3 - ecn: 1 - queue: 3 - pkts_num_fill_min: 0 - pkts_num_trig_ingr_drp: 5140 - cell_size: 208 - wm_buf_pool_lossless: - dscp: 3 - ecn: 1 - pg: 3 - queue: 3 - pkts_num_fill_ingr_min: 6 - pkts_num_trig_pfc: 4457 - pkts_num_trig_ingr_drp: 5140 - pkts_num_fill_egr_min: 16 - cell_size: 208 - xon_1: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_trig_pfc: 4457 - pkts_num_dismiss_pfc: 12 - xon_2: - dscp: 4 - ecn: 1 - pg: 4 - pkts_num_trig_pfc: 4457 - pkts_num_dismiss_pfc: 12 - ecn_1: - dscp: 8 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 208 - ecn_2: - dscp: 8 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 208 - ecn_3: - dscp: 0 - ecn: 0 - num_of_pkts: 5000 - limit: 182000 - min_limit: 180000 - cell_size: 208 - ecn_4: - dscp: 0 - ecn: 1 - num_of_pkts: 2047 - limit: 182320 - min_limit: 0 - cell_size: 208 - lossy_queue_1: - dscp: 8 - ecn: 1 - pg: 0 - pkts_num_trig_egr_drp: 10692 - wrr: - ecn: 1 - q0_num_of_pkts: 140 - q1_num_of_pkts: 140 - q2_num_of_pkts: 140 - q3_num_of_pkts: 150 - q4_num_of_pkts: 150 - q5_num_of_pkts: 140 - q6_num_of_pkts: 140 - limit: 80 - wrr_chg: - ecn: 1 - q0_num_of_pkts: 80 - q1_num_of_pkts: 80 - q2_num_of_pkts: 80 - q3_num_of_pkts: 300 - q4_num_of_pkts: 300 - q5_num_of_pkts: 80 - q6_num_of_pkts: 80 - limit: 80 - lossy_weight: 8 - lossless_weight: 30 - wm_pg_shared_lossless: - dscp: 3 - ecn: 1 - pg: 3 - pkts_num_fill_min: 6 - pkts_num_trig_pfc: 4457 - packet_size: 64 - cell_size: 208 - wm_pg_shared_lossy: - dscp: 8 - ecn: 1 - pg: 0 - pkts_num_fill_min: 0 - pkts_num_trig_egr_drp: 10692 - packet_size: 64 - cell_size: 208 - wm_q_shared_lossy: - dscp: 8 - ecn: 1 - queue: 0 - pkts_num_fill_min: 8 - pkts_num_trig_egr_drp: 10692 - cell_size: 208 - wm_buf_pool_lossy: - dscp: 8 - ecn: 1 - pg: 0 - queue: 0 - pkts_num_fill_ingr_min: 0 - pkts_num_trig_egr_drp: 10692 - pkts_num_fill_egr_min: 16 - cell_size: 208 diff --git a/ansible/vars/qos.yml b/ansible/vars/qos.yml new file mode 120000 index 00000000000..4f5c2c8a026 --- /dev/null +++ b/ansible/vars/qos.yml @@ -0,0 +1 @@ +../../tests/qos/files/qos.yml \ No newline at end of file diff --git a/tests/pytest.ini b/tests/pytest.ini index 1da949457ba..b2d5d6d7858 100644 --- a/tests/pytest.ini +++ b/tests/pytest.ini @@ -1,5 +1,5 @@ [pytest] -addopts = -ra --module-path=../ansible/library/ --show-capture=no --ignore=ptftests --ignore=acstests --ignore=test_vrf.py +addopts = -ra --module-path=../ansible/library/ --show-capture=no --ignore=ptftests --ignore=acstests --ignore=saitests --ignore=test_vrf.py markers: acl: ACL tests bsl: BSL tests diff --git a/tests/qos/__init__.py b/tests/qos/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qos/args/__init__.py b/tests/qos/args/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/qos/args/qos_sai_args.py b/tests/qos/args/qos_sai_args.py new file mode 100644 index 00000000000..188089e28d2 --- /dev/null +++ b/tests/qos/args/qos_sai_args.py @@ -0,0 +1,54 @@ +# QoS-SAI Args file + +def add_qos_sai_args(parser): + """ + Adding arguments required for QoS SAI test cases + + Args: + parser: pytest parser object + + Returns: + None + """ + qos_group = parser.getgroup("QoS test suite options") + + qos_group.addoption( + "--ptf_portmap", + action="store", + type=str, + default=None, + help="PTF port index to DUT port alias map", + ) + + qos_group.addoption( + "--disable_test", + action="store", + type=bool, + default=True, + help="Control execution of buffer watermark experimental tests", + ) + + qos_group.addoption( + "--qos_swap_syncd", + action="store", + type=bool, + default=True, + help="Swap syncd container with syncd-rpc container", + ) + + qos_group.addoption( + "--qos_dst_ports", + action="store", + type=lambda opt_value: [int(v) for v in opt_value.translate(None, "[]").split(',')], + default=[0, 1, 3], + help="QoS SAI comma separated list of destination ports. Test currently expects exactly 3 destination ports", + ) + + qos_group.addoption( + "--qos_src_ports", + action="store", + type=lambda opt_value: [int(v) for v in opt_value.translate(None, "[]").split(',')], + default=[2], + help="QoS SAI comma separated list of source ports. Test currently expects exactly 1 source port", + ) + diff --git a/tests/qos/conftest.py b/tests/qos/conftest.py new file mode 100644 index 00000000000..6ac7f99ba90 --- /dev/null +++ b/tests/qos/conftest.py @@ -0,0 +1,14 @@ +from .args.qos_sai_args import add_qos_sai_args + +# WR-ARP pytest arguments +def pytest_addoption(parser): + ''' + Adds option to QoS pytest + + Args: + parser: pytest parser object + + Returns: + None + ''' + add_qos_sai_args(parser) diff --git a/tests/qos/files/mellanox/packets_aging.py b/tests/qos/files/mellanox/packets_aging.py new file mode 100755 index 00000000000..584962a3d26 --- /dev/null +++ b/tests/qos/files/mellanox/packets_aging.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +''' +This file contains Python script to enable/disable packets aging in queues(buffers?). +''' + +import sys, errno +import os +import argparse +from python_sdk_api.sx_api import (SX_STATUS_SUCCESS, + sx_api_open, + sx_api_port_device_get, + sx_api_port_sll_set, + sx_api_port_sll_get, + sx_api_port_hll_set, + new_sx_port_attributes_t_arr, + new_uint32_t_p, + uint32_t_p_assign, + uint32_t_p_value, + sx_port_attributes_t_arr_getitem) + +parser = argparse.ArgumentParser(description='Toggle Mellanox-specific packet aging on egress queues') +parser.add_argument('command', choices=['enable', 'disable'], type=str, help='Enable/Disable packet aging') +args = parser.parse_args() + +# Open SDK +rc, handle = sx_api_open(None) +if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "Failed to open api handle.\nPlease check that SDK is running." + sys.exit(errno.EACCES) + +# Get list of ports +port_attributes_list = new_sx_port_attributes_t_arr(64) +port_cnt_p = new_uint32_t_p() +uint32_t_p_assign(port_cnt_p, 64) + +rc = sx_api_port_device_get(handle, 1 , 0, port_attributes_list, port_cnt_p) +if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_device_get." + sys.exit() +port_cnt = uint32_t_p_value(port_cnt_p) + +set_mode = False +if args.command == "enable": # enable packets aging + sll_time = 0x418937 + hll_time = 0x83127 + hll_stall = 7 + set_mode = True +else: + assert args.command == "disable" # disable packets aging + sll_time = 0xffffffffffffffff + hll_time = 0xffffffff + hll_stall = 0 + set_mode = True + +if set_mode: + rc = sx_api_port_sll_set(handle, sll_time) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_sll_set." + sys.exit() +else: + sll_p = new_uint64_t_p() + rc = sx_api_port_sll_get(handle, sll_p) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_sll_get." + sys.exit() + else: + sll = uint64_t_p_value(sll_p) + print >> sys.stderr, ("sll_max_time=0x%X" % sll) + +for i in range(0, port_cnt): + port_attributes = sx_port_attributes_t_arr_getitem(port_attributes_list,i) + log_port = int(port_attributes.log_port) + if log_port < 0xFFFFF: # only physical ports + if set_mode: + rc = sx_api_port_hll_set(handle, log_port, hll_time, hll_stall) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_hll_set." + sys.exit() + else: + hll_max_time_p = new_uint32_t_p() + hll_stall_cnt_p = new_uint32_t_p() + rc = sx_api_port_hll_get(handle,log_port, hll_max_time_p, hll_stall_cnt_p) + if (rc != SX_STATUS_SUCCESS): + print >> sys.stderr, "An error returned by sx_api_port_hll_set." + sys.exit() + else: + hll_max_time = uint32_t_p_value(hll_max_time_p) + hll_stall_cnt = uint32_t_p_value(hll_stall_cnt_p) + print >> sys.stderr, ("Port%d(Ethernet%d, logical:0x%X) hll_time:0x%X, hll_stall:0x%X" % + (port_attributes.port_mapping.module_port, (port_attributes.port_mapping.module_port * 4), + log_port, hll_max_time, hll_stall_cnt)) diff --git a/tests/qos/files/qos.yml b/tests/qos/files/qos.yml new file mode 100644 index 00000000000..32362506fcd --- /dev/null +++ b/tests/qos/files/qos.yml @@ -0,0 +1,728 @@ +--- + +# TBD for ACS-MSN2700 xon_1, xon_2: +# Once the new fw version with the fix is burned, should change the +# xon_th_pkts to 10687 +# xoff_th_pkts to 0 +# since xon_th and xoff_th are configured to the same value. +# The current parameters are according to current fw behavior +# +# ecn: +# Dictate the ECN field in assembling a packet +# 0 - not-ECT; 1 - ECT(1); 2 - ECT(0); 3 - CE +# +# ecn_* profile is for ECN limit test, which is removed +# +# Arista-7260CX3-D108C8: +# xoff_1 for 50G +# xoff_2 for 100G +qos_params: + spc1: + 40000_5m: + pkts_num_leak_out: 0 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22038 + pkts_num_trig_ingr_drp: 22115 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 22038 + pkts_num_trig_ingr_drp: 22115 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22038 + pkts_num_trig_ingr_drp: 22115 + cell_size: 96 + packet_size: 300 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 22115 + cell_size: 96 + 40000_40m: + pkts_num_leak_out: 0 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22038 + pkts_num_trig_ingr_drp: 22190 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 22038 + pkts_num_trig_ingr_drp: 22190 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22038 + pkts_num_trig_ingr_drp: 22190 + cell_size: 96 + packet_size: 300 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 22190 + cell_size: 96 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 22038 + pkts_num_dismiss_pfc: 21847 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 22038 + pkts_num_dismiss_pfc: 21847 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 96 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 96 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_trig_egr_drp: 67965 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 22038 + cell_size: 96 + packet_size: 300 + wm_pg_shared_lossy: + dscp: 1 + ecn: 1 + pg: 0 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 67965 + cell_size: 96 + packet_size: 300 + wm_q_shared_lossy: + dscp: 1 + ecn: 1 + queue: 1 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 67965 + cell_size: 96 + wrr: + ecn: 1 + q0_num_of_pkts: 600 + q1_num_of_pkts: 400 + q3_num_of_pkts: 500 + q4_num_of_pkts: 500 + limit: 80 + td2: + 40000_5m: + pkts_num_leak_out: 48 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 5164 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + pkts_num_fill_egr_min: 0 + cell_size: 208 + 40000_300m: + pkts_num_leak_out: 48 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 5164 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 4898 + pkts_num_trig_ingr_drp: 5164 + pkts_num_fill_egr_min: 0 + cell_size: 208 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4898 + pkts_num_dismiss_pfc: 12 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 4898 + pkts_num_dismiss_pfc: 12 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_trig_egr_drp: 31322 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + wrr_chg: + ecn: 1 + q0_num_of_pkts: 80 + q1_num_of_pkts: 80 + q2_num_of_pkts: 80 + q3_num_of_pkts: 300 + q4_num_of_pkts: 300 + q5_num_of_pkts: 80 + q6_num_of_pkts: 80 + limit: 80 + lossy_weight: 8 + lossless_weight: 30 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 4898 + packet_size: 64 + cell_size: 208 + wm_pg_shared_lossy: + dscp: 1 + ecn: 1 + pg: 0 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 31322 + packet_size: 64 + cell_size: 208 + wm_q_shared_lossy: + dscp: 1 + ecn: 1 + queue: 1 + pkts_num_fill_min: 8 + pkts_num_trig_egr_drp: 31322 + cell_size: 208 + wm_buf_pool_lossy: + dscp: 8 + ecn: 1 + pg: 0 + queue: 0 + pkts_num_fill_ingr_min: 0 + pkts_num_trig_egr_drp: 31322 + pkts_num_fill_egr_min: 8 + cell_size: 208 + th: + 40000_300m: + pkts_num_leak_out: 19 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [25, 26, 27, 40, 41] + dst_port_id: 24 + pgs_num: 10 + pkts_num_trig_pfc: 1194 + pkts_num_hdrm_full: 520 + pkts_num_hdrm_partial: 361 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 8 + pkts_num_trig_ingr_drp: 7063 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7063 + pkts_num_fill_egr_min: 8 + cell_size: 208 + 100000_300m: + pkts_num_leak_out: 36 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [17, 18] + dst_port_id: 16 + pgs_num: 4 + pkts_num_trig_pfc: 2620 + pkts_num_hdrm_full: 1292 + pkts_num_hdrm_partial: 1165 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 6542 + cell_size: 208 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 8 + pkts_num_trig_ingr_drp: 7835 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 6542 + pkts_num_trig_ingr_drp: 7835 + pkts_num_fill_egr_min: 8 + cell_size: 208 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 6542 + pkts_num_dismiss_pfc: 11 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 6542 + pkts_num_dismiss_pfc: 11 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_trig_egr_drp: 9887 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + wrr_chg: + ecn: 1 + q0_num_of_pkts: 80 + q1_num_of_pkts: 80 + q2_num_of_pkts: 80 + q3_num_of_pkts: 300 + q4_num_of_pkts: 300 + q5_num_of_pkts: 80 + q6_num_of_pkts: 80 + limit: 80 + lossy_weight: 8 + lossless_weight: 30 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 6542 + packet_size: 64 + cell_size: 208 + wm_pg_shared_lossy: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 9887 + packet_size: 64 + cell_size: 208 + wm_q_shared_lossy: + dscp: 8 + ecn: 1 + queue: 0 + pkts_num_fill_min: 8 + pkts_num_trig_egr_drp: 9887 + cell_size: 208 + wm_buf_pool_lossy: + dscp: 8 + ecn: 1 + pg: 0 + queue: 0 + pkts_num_fill_ingr_min: 0 + pkts_num_trig_egr_drp: 9887 + pkts_num_fill_egr_min: 8 + cell_size: 208 + th2: + 40000_300m: + pkts_num_leak_out: 0 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 4978 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 4978 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [6, 7, 8, 9, 10, 38, 39, 40, 41, 42] + dst_port_id: 32 + pgs_num: 19 + pkts_num_trig_pfc: 1490 + pkts_num_hdrm_full: 520 + pkts_num_hdrm_partial: 47 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 4978 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 4978 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 4978 + pkts_num_fill_egr_min: 16 + cell_size: 208 + 50000_300m: + pkts_num_leak_out: 0 + xoff_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 5140 + xoff_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 5140 + hdrm_pool_size: + dscps: [3, 4] + ecn: 1 + pgs: [3, 4] + src_port_ids: [1, 2, 3, 4, 5, 6, 7] + dst_port_id: 0 + pgs_num: 14 + pkts_num_trig_pfc: 1826 + pkts_num_hdrm_full: 682 + pkts_num_hdrm_partial: 542 + wm_pg_headroom: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 5140 + cell_size: 208 + wm_q_shared_lossless: + dscp: 3 + ecn: 1 + queue: 3 + pkts_num_fill_min: 0 + pkts_num_trig_ingr_drp: 5140 + cell_size: 208 + wm_buf_pool_lossless: + dscp: 3 + ecn: 1 + pg: 3 + queue: 3 + pkts_num_fill_ingr_min: 6 + pkts_num_trig_pfc: 4457 + pkts_num_trig_ingr_drp: 5140 + pkts_num_fill_egr_min: 16 + cell_size: 208 + xon_1: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_trig_pfc: 4457 + pkts_num_dismiss_pfc: 12 + xon_2: + dscp: 4 + ecn: 1 + pg: 4 + pkts_num_trig_pfc: 4457 + pkts_num_dismiss_pfc: 12 + ecn_1: + dscp: 8 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_2: + dscp: 8 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + ecn_3: + dscp: 0 + ecn: 0 + num_of_pkts: 5000 + limit: 182000 + min_limit: 180000 + cell_size: 208 + ecn_4: + dscp: 0 + ecn: 1 + num_of_pkts: 2047 + limit: 182320 + min_limit: 0 + cell_size: 208 + lossy_queue_1: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_trig_egr_drp: 10692 + wrr: + ecn: 1 + q0_num_of_pkts: 140 + q1_num_of_pkts: 140 + q2_num_of_pkts: 140 + q3_num_of_pkts: 150 + q4_num_of_pkts: 150 + q5_num_of_pkts: 140 + q6_num_of_pkts: 140 + limit: 80 + wrr_chg: + ecn: 1 + q0_num_of_pkts: 80 + q1_num_of_pkts: 80 + q2_num_of_pkts: 80 + q3_num_of_pkts: 300 + q4_num_of_pkts: 300 + q5_num_of_pkts: 80 + q6_num_of_pkts: 80 + limit: 80 + lossy_weight: 8 + lossless_weight: 30 + wm_pg_shared_lossless: + dscp: 3 + ecn: 1 + pg: 3 + pkts_num_fill_min: 6 + pkts_num_trig_pfc: 4457 + packet_size: 64 + cell_size: 208 + wm_pg_shared_lossy: + dscp: 8 + ecn: 1 + pg: 0 + pkts_num_fill_min: 0 + pkts_num_trig_egr_drp: 10692 + packet_size: 64 + cell_size: 208 + wm_q_shared_lossy: + dscp: 8 + ecn: 1 + queue: 0 + pkts_num_fill_min: 8 + pkts_num_trig_egr_drp: 10692 + cell_size: 208 + wm_buf_pool_lossy: + dscp: 8 + ecn: 1 + pg: 0 + queue: 0 + pkts_num_fill_ingr_min: 0 + pkts_num_trig_egr_drp: 10692 + pkts_num_fill_egr_min: 16 + cell_size: 208 diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py new file mode 100644 index 00000000000..f227e4b9385 --- /dev/null +++ b/tests/qos/qos_sai_base.py @@ -0,0 +1,838 @@ +import ipaddress +import logging +import pytest +import re +import yaml + +from common.mellanox_data import is_mellanox_device as isMellanoxDevice +from common.system_utils import docker + +logger = logging.getLogger(__name__) + +class QosSaiBase: + """ + QosSaiBase contains collection of pytest fixtures that ready the tesbed for QoS SAI test cases. + """ + SUPPORTED_T0_TOPOS = ["t0", "t0-64", "t0-116"] + SUPPORTED_PTF_TOPOS = ['ptf32', 'ptf64'] + SUPPORTED_ASIC_LIST = ["td2", "th", "th2", "spc1", "spc2", "spc3"] + TARGET_QUEUE_WRED = 3 + TARGET_LOSSY_QUEUE_SCHED = 0 + TARGET_LOSSLESS_QUEUE_SCHED = 3 + DEFAULT_PORT_INDEX_TO_ALIAS_MAP_FILE = "/tmp/default_interface_to_front_map.ini" + + def __runRedisCommandOrAssert(self, duthost, argv=[]): + """ + Runs Redis command on DUT host. + + The method asserts if the command fails. + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + argv (list): List of commands to run on duthost + + Returns: + stdout (list): List of stdout lines spewed by the invoked command + """ + result = duthost.shell(argv=argv) + assert result["rc"] == 0, \ + "Failed to run Redis command '{0}' with error '{1}'".format(" ".join(argv), result["stderr"]) + + return result["stdout_lines"] + + def __computeBufferThreshold(self, duthost, bufferProfile): + """ + Computes buffer threshold for dynamic threshold profiles + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + bufferProfile (dict, inout): Map of puffer profile attributes + + Returns: + Updates bufferProfile with computed buffer threshold + """ + pool = bufferProfile["pool"].encode("utf-8").translate(None, "[]") + bufferSize = int(self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGET", pool, "size"] + )[0]) + bufferScale = 2**float(bufferProfile["dynamic_th"]) + bufferScale /= (bufferScale + 1) + bufferProfile.update({"static_th": int(bufferProfile["size"]) + int(bufferScale * bufferSize)}) + + def __updateVoidRoidParams(self, duthost, bufferProfile): + """ + Updates buffer profile with VOID/ROID params + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + bufferProfile (dict, inout): Map of puffer profile attributes + + Returns: + Updates bufferProfile with VOID/ROID obtained from Redis db + """ + bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(None, "[]").replace("BUFFER_POOL|",'') + + bufferPoolVoid = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "2", "HGET", "COUNTERS_BUFFER_POOL_NAME_MAP", bufferPoolName] + )[0].encode("utf-8") + bufferProfile.update({"bufferPoolVoid": bufferPoolVoid}) + + bufferPoolRoid = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "1", "HGET", "VIDTORID", bufferPoolVoid] + )[0].encode("utf-8").replace("oid:",'') + bufferProfile.update({"bufferPoolRoid": bufferPoolRoid}) + + def __getBufferProfile(self, request, duthost, table, port, priorityGroup): + """ + Get buffer profile attribute from Redis db + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + table (str): Redis table name + port (str): DUT port alias + priorityGroup (str): QoS priority group + + Returns: + bufferProfile (dict): Map of buffer profile attributes + """ + bufferProfileName = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGET", "{0}|{1}|{2}".format(table, port, priorityGroup), "profile"] + )[0].encode("utf-8").translate(None, "[]") + + result = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGETALL", bufferProfileName] + ) + it = iter(result) + bufferProfile = dict(zip(it, it)) + bufferProfile.update({"profileName": bufferProfileName}) + + # Update profile static threshold value if profile threshold is dynamic + if "dynamic_th" in bufferProfile.keys(): + self.__computeBufferThreshold(duthost, bufferProfile) + + if "pg_lossless" in bufferProfileName: + assert "xon" in bufferProfile.keys() and "xoff" in bufferProfile.keys(), \ + "Could not find xon and/or xoff values for profile '{0}'".format(bufferProfileName) + + disableTest = request.config.getoption("--disable_test") + if not disableTest: + self.__updateVoidRoidParams(duthost, bufferProfile) + + return bufferProfile + + def __getEcnWredParam(self, duthost, table, port): + """ + Get ECN/WRED parameters from Redis db + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + table (str): Redis table name + port (str): DUT port alias + + Returns: + wredProfile (dict): Map of ECN/WRED attributes + """ + wredProfileName = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGET", "{0}|{1}|{2}".format(table, port, self.TARGET_QUEUE_WRED), "wred_profile"] + )[0].encode("utf-8").translate(None, "[]") + + result = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGETALL", wredProfileName] + ) + it = iter(result) + wredProfile = dict(zip(it, it)) + + return wredProfile + + def __getWatermarkStatus(self, duthost): + """ + Get watermark status from Redis db + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + watermarkStatus (str): Watermark status + """ + watermarkStatus = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGET", "FLEX_COUNTER_TABLE|QUEUE_WATERMARK", "FLEX_COUNTER_STATUS"] + )[0].encode("utf-8") + + return watermarkStatus + + def __getSchedulerParam(self, duthost, port, queue): + """ + Get scheduler parameters from Redis db + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + port (str): DUT port alias + queue (str): QoS queue + + Returns: + SchedulerParam (dict): Map of scheduler parameters + """ + schedProfile = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGET", "QUEUE|{0}|{1}".format(port, queue), "scheduler"] + )[0].encode("utf-8").translate(None, "[]") + + schedWeight = self.__runRedisCommandOrAssert( + duthost, + argv = ["redis-cli", "-n", "4", "HGET", schedProfile, "weight"] + )[0].encode("utf-8") + + return {"schedProfile": schedProfile, "schedWeight": schedWeight} + + def __assignTestPortIps(self, mgFacts): + """ + Assign IPs to test ports of DUT host + + Args: + mgFacts (dict): Map of DUT minigraph facts + + Returns: + dutPortIps (dict): Map of port index to IPs + """ + dutPortIps = {} + if len(mgFacts["minigraph_vlans"]) > 0: + #TODO: handle the case when there are multiple vlans + testVlan = next(iter(mgFacts["minigraph_vlans"])) + testVlanMembers = mgFacts["minigraph_vlans"][testVlan]["members"] + + testVlanIp = None + for vlan in mgFacts["minigraph_vlan_interfaces"]: + if mgFacts["minigraph_vlans"][testVlan]["name"] in vlan["attachto"]: + testVlanIp = ipaddress.ip_address(unicode(vlan["addr"])) + break + assert testVlanIp, "Failed to obtain vlan IP" + + for i in range(len(testVlanMembers)): + portIndex = mgFacts["minigraph_port_indices"][testVlanMembers[i]] + dutPortIps.update({portIndex: str(testVlanIp + portIndex + 1)}) + + return dutPortIps + + def __buildTestPorts(self, request, testPortIds, testPortIps): + """ + Build map of test ports index and IPs + + Args: + request (Fixture): pytest request object + testPortIds (list): List of QoS SAI test port IDs + testPortIps (list): List of QoS SAI test port IPs + + Returns: + testPorts (dict): Map of test ports index and IPs + """ + dstPorts = request.config.getoption("--qos_dst_ports") + srcPorts = request.config.getoption("--qos_src_ports") + + assert len(set(dstPorts).intersection(set(srcPorts))) == 0, \ + "Duplicate destination and source ports '{0}'".format(set(dstPorts).intersection(set(srcPorts))) + + assert len(dstPorts) == 3 and len(srcPorts) == 1, \ + "Invalid number of ports provided, qos_dst_ports:{0}, qos_src_ports:{1}".format(len(dstPorts), len(srcPorts)) + + #TODO: Randomize port selection + return { + "dst_port_id": testPortIds[dstPorts[0]], + "dst_port_ip": testPortIps[testPortIds[dstPorts[0]]], + "dst_port_2_id": testPortIds[dstPorts[1]], + "dst_port_2_ip": testPortIps[testPortIds[dstPorts[1]]], + 'dst_port_3_id': testPortIds[dstPorts[2]], + "dst_port_3_ip": testPortIps[testPortIds[dstPorts[2]]], + "src_port_id": testPortIds[srcPorts[0]], + "src_port_ip": testPortIps[testPortIds[srcPorts[0]]], + } + + def runPtfTest(self, ptfhost, testCase='', testParams={}): + """ + Runs QoS SAI test case on PTF host + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + testCase (str): SAI tests test case name + testParams (dict): Map of test params required by testCase + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + assert ptfhost.shell( + argv = [ + "ptf", + "--test-dir", + "saitests", + testCase, + "--platform-dir", + "ptftests", + "--platform", + "remote", + "-t", + ";".join(["{}={}".format(k, repr(v)) for k, v in testParams.items()]), + "--disable-ipv6", + "--disable-vxlan", + "--disable-geneve", + "--disable-erspan", + "--disable-mpls", + "--disable-nvgre", + "--log-file", + "/tmp/{0}.log".format(testCase), + ], + chdir = "/root", + )["rc"] == 0, "Failed when running test '{0}'".format(testCase) + + @pytest.fixture(scope='class') + def swapSyncd(self, request, duthost): + """ + Swap syncd on DUT host + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + None + """ + swapSyncd = request.config.getoption("--qos_swap_syncd") + if swapSyncd: + docker.swap_syncd(duthost) + + yield + + if swapSyncd: + docker.restore_default_syncd(duthost) + + @pytest.fixture(scope='class', autouse=True) + def dutConfig(self, request, duthost): + """ + Build DUT host config pertaining to QoS SAI tests + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + dutConfig (dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, and + test ports + """ + dutLagInterfaces = [] + mgFacts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + + for _, lag in mgFacts["minigraph_portchannels"].items(): + for intf in lag["members"]: + dutLagInterfaces.append(mgFacts["minigraph_port_indices"][intf]) + + testPortIds = set(mgFacts["minigraph_port_indices"][port] for port in mgFacts["minigraph_ports"].keys()) + testPortIds -= set(dutLagInterfaces) + if isMellanoxDevice(duthost): + # The last port is used for up link from DUT switch + testPortIds -= {len(mgFacts["minigraph_port_indices"]) - 1} + testPortIds = sorted(testPortIds) + + # get current DUT port IPs + dutPortIps = {} + for portConfig in mgFacts["minigraph_interfaces"]: + if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4: + portIndex = mgFacts["minigraph_port_indices"][portConfig["attachto"]] + if portIndex in testPortIds: + dutPortIps.update({portIndex: portConfig["peer_addr"]}) + + testPortIps = self.__assignTestPortIps(mgFacts) + # restore currently assigned IPs + testPortIps.update(dutPortIps) + + testPorts = self.__buildTestPorts(request, testPortIds, testPortIps) + yield { + "dutInterfaces" : {index: port for port, index in mgFacts["minigraph_port_indices"].items()}, + "testPortIds": testPortIds, + "testPortIps": testPortIps, + "testPorts": testPorts, + } + + @pytest.fixture(scope='class') + def updateIptables(self, duthost, swapSyncd): + """ + Update iptables on DUT host with drop rule for BGP SYNC packets + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + swapSyncd (Fixture): swapSyncd fixture is required to run prior to updating iptables + + Returns: + None + """ + def updateIptablesDropRule(duthost, ipVersion, state='present'): + duthost.iptables( + ip_version=ipVersion, + action="Append", + chain="INPUT", + jump="DROP", + protocol="tcp", + destination_port="bgp", + state=state + ) + + ipVersions = [{"ipVersion": "ipv4"}, {"ipVersion": "ipv6"}] + + logger.info("Add ip[6]tables rule to drop BGP SYN Packet from peer so that we do not ACK back") + for ipVersion in ipVersions: + updateIptablesDropRule(duthost, state="present", **ipVersion) + + yield + + logger.info("Remove ip[6]tables rule to drop BGP SYN Packet from Peer") + for ipVersion in ipVersions: + updateIptablesDropRule(duthost, state="absent", **ipVersion) + + @pytest.fixture(scope='class') + def stopServices(self, duthost, swapSyncd): + """ + Stop services (lldp-syncs, lldpd, bgpd) on DUT host prior to test start + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + swapSyncd (Fxiture): swapSyncd fixture is required to run prior to stopping services + + Returns: + None + """ + def updateDockerService(host, docker="", action="", service=""): + """ + Helper function to update docker services + + Args: + host (AnsibleHost): Ansible host that is running docker + docker (str): docker container name + action (str): action to apply to service running within docker + service (str): service name running within docker + + Returns: + None + """ + host.command( + "docker exec {docker} supervisorctl {action} {service}".format( + docker=docker, + action=action, + service=service + ) + ) + + services = [ + {"docker": "lldp", "service": "lldp-syncd"}, + {"docker": "lldp", "service": "lldpd"}, + {"docker": "bgp", "service": "bgpd"}, + ] + + logger.info("Stop lldp, lldp-syncd, and bgpd services") + for service in services: + updateDockerService(duthost, action="stop", **service) + + yield + + logger.info("Start lldp, lldp-syncd, and bgpd services") + for service in services: + updateDockerService(duthost, action="start", **service) + + @pytest.fixture(scope='class', autouse=True) + def disablePacketAging(self, duthost, stopServices): + """ + disable packet aging on DUT host + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + stopServices (Fxiture): stopServices fixture is required to run prior to disabling packet aging + + Returns: + None + """ + if isMellanoxDevice(duthost): + logger.info("Disable Mellanox packet aging") + duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp") + duthost.command("docker cp /tmp/packets_aging.py syncd:/") + duthost.command("docker exec syncd python /packets_aging.py disable") + + yield + + if isMellanoxDevice(duthost): + logger.info("Enable Mellanox packet aging") + duthost.command("docker exec syncd python /packets_aging.py enable") + duthost.command("docker exec syncd rm -rf /packets_aging.py") + + @pytest.fixture(scope='class', autouse=True) + def dutQosConfig(self, duthost, ingressLosslessProfile): + """ + Prepares DUT host QoS configuration + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ingressLosslessProfile (Fxiture): ingressLosslessProfile fixture is required to run prior to collecting + QoS configuration + + Returns: + QoSConfig (dict): Map containing DUT host QoS configuration + """ + mgFacts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + assert "minigraph_hwsku" in mgFacts, "Could not find DUT SKU" + + profileName = ingressLosslessProfile["profileName"] + m = re.search("^BUFFER_PROFILE\|pg_lossless_(.*)_profile$", profileName) + assert m.group(1), "Cannot find port speed/cable length" + + portSpeedCableLength = m.group(1) + + qosConfigs = {} + with open(r"qos/files/qos.yml") as file: + qosConfigs = yaml.load(file, Loader=yaml.FullLoader) + + vendor = duthost.get_asic_type() + hostvars = duthost.host.options['variable_manager']._hostvars[duthost.hostname] + dutAsic = None + for asic in self.SUPPORTED_ASIC_LIST: + vendorAsic = "{0}_{1}_hwskus".format(vendor, asic) + if vendorAsic in hostvars.keys() and mgFacts["minigraph_hwsku"] in hostvars[vendorAsic]: + dutAsic = asic + break + assert dutAsic, "Cannot identify DUT ASIC type" + + yield { + "param": qosConfigs["qos_params"][dutAsic], + "portSpeedCableLength": portSpeedCableLength, + } + + @pytest.fixture(scope='class', autouse=True) + def copyPtfDirectory(self, ptfhost): + """ + Copys PTF directory to PTF host. This class-scope fixture runs once before test start + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + ptfhost.copy(src="ptftests", dest="/root") + + @pytest.fixture(scope='class') + def ptfPortMapFile(self, request, duthost, ptfhost): + """ + Prepare and copys port map file to PTF host + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + filename (str): returns the filename copied to PTF host + """ + ptfhost.copy(src="ptftests", dest="/root") + portMapFile = request.config.getoption("--ptf_portmap") + if not portMapFile: + portMapFile = self.DEFAULT_PORT_INDEX_TO_ALIAS_MAP_FILE + mgFacts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"] + with open(portMapFile, 'w') as file: + file.write("# ptf host interface @ switch front port name\n") + file.writelines( + map( + lambda (port, index): "{0}@{1}\n".format(index, port), + mgFacts["minigraph_port_indices"].items() + ) + ) + + ptfhost.copy(src=portMapFile, dest="/root/") + + yield "/root/{}".format(portMapFile.split('/')[-1]) + + @pytest.fixture(scope='class', autouse=True) + def copySaiTests(self, ptfhost): + """ + Copys SAI directory to PTF host. This class-scope fixture runs once before test start + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + ptfhost.copy(src="saitests", dest="/root") + + @pytest.fixture(scope='class', autouse=True) + def dutTestParams(self, duthost, testbed, ptfPortMapFile): + """ + Prepares DUT host test params + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + testbed (Fixture, dict): Map containing testbed information + ptfPortMapFile (Fxiture, str): filename residing on PTF host and contains port maps information + + Returns: + dutTestParams (dict): DUT host test params + """ + dutFacts = duthost.setup()['ansible_facts'] + mgFacts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + topo = testbed["topo"]["name"] + + yield { + "topo": topo, + "hwsku": mgFacts["minigraph_hwsku"], + "basicParams": { + "router_mac": '' if topo in self.SUPPORTED_T0_TOPOS else dutFacts['ansible_Ethernet0']['macaddress'], + "server": duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host'], + "port_map_file": ptfPortMapFile, + "sonic_asic_type": duthost.facts['asic_type'], + } + } + + @pytest.fixture(scope='class', autouse=True) + def changePtfhostMacAddresses(self, ptfhost): + """ + Change MAC addresses (unique) on PTF host. This class-scope fixture runs once before test start + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + + Returns: + None + """ + ptfhost.script("scripts/change_mac.sh") + + @pytest.fixture(scope='class') + def releaseAllPorts(self, ptfhost, dutTestParams, updateIptables): + """ + Release all paused ports prior to running QoS SAI test cases + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + updateIptables (Fixture, dict): updateIptables to run prior to releasing paused ports + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + self.runPtfTest(ptfhost, testCase="sai_qos_tests.ReleaseAllPorts", testParams=dutTestParams["basicParams"]) + + @pytest.fixture(scope='class', autouse=True) + def populateArpEntries(self, duthost, ptfhost, dutTestParams, dutConfig, releaseAllPorts): + """ + Update ARP entries of QoS SAI test ports + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + releaseAllPorts (Fixture, dict): releaseAllPorts to run prior to updating ARP entries + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + saiQosTest = None + if dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS: + saiQosTest = "sai_qos_tests.ARPpopulate" + elif dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS: + saiQosTest = "sai_qos_tests.ARPpopulatePTF" + else: + result = duthost.command(argv = ["arp", "-n"]) + assert result["rc"] == 0, "failed to run arp command on {0}".format(duthost.hostname) + if result["stdout"].find("incomplete") == -1: + saiQosTest = "sai_qos_tests.ARPpopulate" + + if saiQosTest: + testParams = dutTestParams["basicParams"] + testParams.update(dutConfig["testPorts"]) + self.runPtfTest(ptfhost, testCase=saiQosTest, testParams=testParams) + + @pytest.fixture(scope='class', autouse=True) + def ingressLosslessProfile(self, request, duthost, dutConfig): + """ + Retreives ingress lossless profile + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + ingressLosslessProfile (dict): Map of ingress lossless buffer profile attributes + """ + yield self.__getBufferProfile( + request, + duthost, + "BUFFER_PG", + dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], + "3-4" + ) + + @pytest.fixture(scope='class', autouse=True) + def ingressLossyProfile(self, request, duthost, dutConfig): + """ + Retreives ingress lossy profile + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + ingressLossyProfile (dict): Map of ingress lossy buffer profile attributes + """ + yield self.__getBufferProfile( + request, + duthost, + "BUFFER_PG", + dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], + "0" + ) + + @pytest.fixture(scope='class', autouse=True) + def egressLosslessProfile(self, request, duthost, dutConfig): + """ + Retreives egress lossless profile + + Args: + request (Fixture): pytest request object + duthost (AnsibleHost): Device Under Test (DUT) + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + egressLosslessProfile (dict): Map of egress lossless buffer profile attributes + """ + yield self.__getBufferProfile( + request, + duthost, + "BUFFER_QUEUE", + dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], + "3-4" + ) + + @pytest.fixture(scope='class') + def losslessSchedProfile(self, duthost, dutConfig): + """ + Retreives lossless scheduler profile + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + losslessSchedProfile (dict): Map of scheduler parameters + """ + yield self.__getSchedulerParam( + duthost, + dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], + self.TARGET_LOSSLESS_QUEUE_SCHED + ) + + @pytest.fixture(scope='class') + def lossySchedProfile(self, duthost, dutConfig): + """ + Retreives lossy scheduler profile + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + lossySchedProfile (dict): Map of scheduler parameters + """ + yield self.__getSchedulerParam( + duthost, + dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]], + self.TARGET_LOSSY_QUEUE_SCHED + ) + + @pytest.fixture + def updateSchedProfile(self, duthost, dutQosConfig, losslessSchedProfile, lossySchedProfile): + """ + Updates lossless/lossy scheduler profiles + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + losslessSchedProfile (Fixture, dict): Map of lossless scheduler parameters + lossySchedProfile (Fixture, dict): Map of lossy scheduler parameters + + Returns: + None + """ + def updateRedisSchedParam(schedParam): + """ + Helper function to updates lossless/lossy scheduler profiles + + Args: + schedParam (dict): Scheduler params to be set + + Returns: + None + """ + self.__runRedisCommandOrAssert( + duthost, + argv = [ + "redis-cli", + "-n", + "4", + "HSET", + schedParam["profile"], + "weight", + schedParam["qosConfig"] + ] + ) + + wrrSchedParams = [ + {"profile": lossySchedProfile["schedProfile"], "qosConfig": dutQosConfig["param"]["wrr_chg"]["lossy_weight"]}, + {"profile": losslessSchedProfile["schedProfile"], "qosConfig": dutQosConfig["param"]["wrr_chg"]["lossless_weight"]}, + ] + + for schedParam in wrrSchedParams: + updateRedisSchedParam(schedParam) + + yield + + schedProfileParams = [ + {"profile": lossySchedProfile["schedProfile"], "qosConfig": lossySchedProfile["schedWeight"]}, + {"profile": losslessSchedProfile["schedProfile"], "qosConfig": losslessSchedProfile["schedWeight"]}, + ] + + for schedParam in schedProfileParams: + updateRedisSchedParam(schedParam) + + @pytest.fixture + def resetWatermark(self, duthost): + """ + Reset queue watermark + + Args: + duthost (AnsibleHost): Device Under Test (DUT) + + Returns: + None + """ + duthost.shell("counterpoll watermark enable") + duthost.shell("sleep 20") + duthost.shell("counterpoll watermark disable") diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py new file mode 100644 index 00000000000..833305d9a35 --- /dev/null +++ b/tests/qos/test_qos_sai.py @@ -0,0 +1,465 @@ +""" + QoS SAI feature in SONiC. + + Parameters: + --ptf_portmap (str): file name of port index to DUT interface alias map. Default is None. + In case of a filename is not provided, a file containing port indeces to aliases map will be generated. + + --disable_test (bool): Disables experimental QoS SAI test cases. Deafult is True + + --qos_swap_syncd (bool): Used to install the RPC syncd image before running the tests. Default is True. + + --qos_dst_ports (list) Indeces of available DUT test ports to serve as destination ports. Note, This is not port + index on DUT, rather an index into filtered (excludes lag member ports) DUT ports. Plan is to randomize port + selection. Default is [0, 1, 3] + + --qos_src_ports (list) Indeces of available DUT test ports to serve as source port. Similar note as in + qos_dst_ports applies. Default is [2] +""" + +import logging +import pytest + +from qos_sai_base import QosSaiBase + +logger = logging.getLogger(__name__) + +class TestQosSai(QosSaiBase): + """ + TestQosSai derives from QosSaiBase and contains collection of QoS SAI test cases. + """ + SUPPORTED_PGSHARED_WATERMARK_SKUS = ['Arista-7260CX3-Q64', 'Arista-7260CX3-D108C8'] + SUPPORTED_HEADROOM_SKUS = [ + 'Arista-7060CX-32S-C32', + 'Celestica-DX010-C32', + 'Arista-7260CX3-D108C8', + 'Force10-S6100', + 'Arista-7260CX3-Q64' + ] + + @pytest.mark.parametrize("xoffProfile", ["xoff_1", "xoff_2"]) + def testQosSaiPfcXoffLimit(self, xoffProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + ingressLosslessProfile, egressLosslessProfile): + """ + Test QoS SAI XOFF limits + + Args: + xoffProfile (pytest parameter): XOFF profile + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + ingressLosslessProfile (Fxiture): Map of egress lossless buffer profile attributes + egressLosslessProfile (Fxiture): Map of egress lossless buffer profile attributes + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"][portSpeedCableLength] + testParams = { + "dscp": qosConfig[xoffProfile]["dscp"], + "ecn": qosConfig[xoffProfile]["ecn"], + "pg": qosConfig[xoffProfile]["pg"], + "buffer_max_size": ingressLosslessProfile["size"], + "queue_max_size": egressLosslessProfile["static_th"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "pkts_num_leak_out": qosConfig["pkts_num_leak_out"], + "pkts_num_trig_pfc": qosConfig[xoffProfile]["pkts_num_trig_pfc"], + "pkts_num_trig_ingr_drp": qosConfig[xoffProfile]["pkts_num_trig_ingr_drp"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.PFCtest", testParams=testParams) + + @pytest.mark.parametrize("xonProfile", ["xon_1", "xon_2"]) + def testQosSaiPfcXonLimit(self, xonProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + ingressLosslessProfile): + """ + Test QoS SAI XON limits + + Args: + xonProfile (pytest parameter): XON profile + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + ingressLosslessProfile (Fxiture): Map of egress lossless buffer profile attributes + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"] + testParams = { + "dscp": qosConfig[xonProfile]["dscp"], + "ecn": qosConfig[xonProfile]["ecn"], + "pg": qosConfig[xonProfile]["pg"], + "buffer_max_size": ingressLosslessProfile["size"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "dst_port_2_id": dutConfig["testPorts"]["dst_port_2_id"], + "dst_port_2_ip": dutConfig["testPorts"]["dst_port_2_ip"], + "dst_port_3_id": dutConfig["testPorts"]["dst_port_3_id"], + "dst_port_3_ip": dutConfig["testPorts"]["dst_port_3_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"], + "pkts_num_trig_pfc": qosConfig[xonProfile]["pkts_num_trig_pfc"], + "pkts_num_dismiss_pfc": qosConfig[xonProfile]["pkts_num_dismiss_pfc"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.PFCXonTest", testParams=testParams) + + def testQosSaiHeadroomPoolSize(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile): + """ + Test QoS SAI Headroom pool size + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + ingressLosslessProfile (Fxiture): Map of egress lossless buffer profile attributes + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + if dutTestParams["hwsku"] not in self.SUPPORTED_HEADROOM_SKUS: + pytest.skip("Headroom pool size not supported") + + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"][portSpeedCableLength] + testPortIps = dutConfig["testPortIps"] + + testParams = { + "testbed_type": dutTestParams["topo"], + "dscps": qosConfig["hdrm_pool_size"]["dscps"], + "ecn": qosConfig["hdrm_pool_size"]["ecn"], + "pgs": qosConfig["hdrm_pool_size"]["pgs"], + "src_port_ids": qosConfig["hdrm_pool_size"]["src_port_ids"], + "src_port_ips": [testPortIps[port] for port in qosConfig["hdrm_pool_size"]["src_port_ids"]], + "dst_port_id": qosConfig["hdrm_pool_size"]["dst_port_id"], + "dst_port_ip": testPortIps[qosConfig["hdrm_pool_size"]["dst_port_id"]], + "pgs_num": qosConfig["hdrm_pool_size"]["pgs_num"], + "pkts_num_leak_out": qosConfig["pkts_num_leak_out"], + "pkts_num_trig_pfc": qosConfig["hdrm_pool_size"]["pkts_num_trig_pfc"], + "pkts_num_hdrm_full": qosConfig["hdrm_pool_size"]["pkts_num_hdrm_full"], + "pkts_num_hdrm_partial": qosConfig["hdrm_pool_size"]["pkts_num_hdrm_partial"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest", testParams=testParams) + + def testQosSaiLossyQueue(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLossyProfile): + """ + Test QoS SAI Lossy queue, shared buffer dynamic allocation + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + ingressLosslessProfile (Fxiture): Map of egress lossless buffer profile attributes + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"] + + testParams = { + "dscp": qosConfig["lossy_queue_1"]["dscp"], + "ecn": qosConfig["lossy_queue_1"]["ecn"], + "pg": qosConfig["lossy_queue_1"]["pg"], + "buffer_max_size": ingressLossyProfile["static_th"], + "headroom_size": ingressLossyProfile["size"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "dst_port_2_id": dutConfig["testPorts"]["dst_port_2_id"], + "dst_port_2_ip": dutConfig["testPorts"]["dst_port_2_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"], + "pkts_num_trig_egr_drp": qosConfig["lossy_queue_1"]["pkts_num_trig_egr_drp"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.LossyQueueTest", testParams=testParams) + + def testQosSaiDscpQueueMapping(self, ptfhost, dutTestParams, dutConfig): + """ + Test QoS SAI DSCP to queue mapping + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + testParams = { + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.DscpMappingPB", testParams=testParams) + + def testQosSaiDwrr(self, ptfhost, dutTestParams, dutConfig, dutQosConfig): + """ + Test QoS SAI DWRR + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"] + + testParams = { + "ecn": qosConfig["lossy_queue_1"]["ecn"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "q0_num_of_pkts": qosConfig["wrr"]["q0_num_of_pkts"], + "q1_num_of_pkts": qosConfig["wrr"]["q1_num_of_pkts"], + "q2_num_of_pkts": qosConfig["wrr"]["q2_num_of_pkts"], + "q3_num_of_pkts": qosConfig["wrr"]["q3_num_of_pkts"], + "q4_num_of_pkts": qosConfig["wrr"]["q4_num_of_pkts"], + "q5_num_of_pkts": qosConfig["wrr"]["q5_num_of_pkts"], + "q6_num_of_pkts": qosConfig["wrr"]["q6_num_of_pkts"], + "limit": qosConfig["wrr"]["limit"], + "pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams) + + @pytest.mark.parametrize("pgProfile", ["wm_pg_shared_lossless", "wm_pg_shared_lossy"]) + def testQosSaiPgSharedWatermark(self, pgProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, + resetWatermark): + """ + Test QoS SAI PG shared watermark test for lossless/lossy traffic + + Args: + pgProfile (pytest parameter): priority group profile + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + resetWatermark (Fxiture): reset queue watermarks + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + if dutTestParams["hwsku"] in self.SUPPORTED_PGSHARED_WATERMARK_SKUS: + pytest.skip("PG shared watermark test not supported") + + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"] + + if "wm_pg_shared_lossless" in pgProfile: + pktsNumFillShared = qosConfig[pgProfile]["pkts_num_trig_pfc"] + elif "wm_pg_shared_lossy" in pgProfile: + pktsNumFillShared = int(qosConfig[pgProfile]["pkts_num_trig_egr_drp"]) - 1 + + testParams = { + "dscp": qosConfig[pgProfile]["dscp"], + "ecn": qosConfig[pgProfile]["ecn"], + "pg": qosConfig[pgProfile]["pg"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"], + "pkts_num_fill_min": qosConfig[pgProfile]["pkts_num_fill_min"], + "pkts_num_fill_shared": pktsNumFillShared, + "packet_size": qosConfig[pgProfile]["packet_size"], + "cell_size": qosConfig[pgProfile]["cell_size"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.PGSharedWatermarkTest", testParams=testParams) + + def testQosSaiPgHeadroomWatermark(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark): + """ + Test QoS SAI PG headroom watermark test + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + resetWatermark (Fxiture): reset queue watermarks + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"][portSpeedCableLength] + + testParams = { + "dscp": qosConfig["wm_pg_headroom"]["dscp"], + "ecn": qosConfig["wm_pg_headroom"]["ecn"], + "pg": qosConfig["wm_pg_headroom"]["pg"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "pkts_num_leak_out": qosConfig["pkts_num_leak_out"], + "pkts_num_trig_pfc": qosConfig["wm_pg_headroom"]["pkts_num_trig_pfc"], + "pkts_num_trig_ingr_drp": qosConfig["wm_pg_headroom"]["pkts_num_trig_ingr_drp"], + "cell_size": qosConfig["wm_pg_headroom"]["cell_size"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.PGHeadroomWatermarkTest", testParams=testParams) + + @pytest.mark.parametrize("queueProfile", ["wm_q_shared_lossless", "wm_q_shared_lossy"]) + def testQosSaiQSharedWatermark(self, queueProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark): + """ + Test QoS SAI Queue shared watermark test for lossless/lossy traffic + + Args: + queueProfile (pytest parameter): queue profile + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + resetWatermark (Fxiture): reset queue watermarks + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"][portSpeedCableLength] if "wm_q_shared_lossless" in queueProfile \ + else dutQosConfig["param"] + triggerDrop = qosConfig[queueProfile]["pkts_num_trig_ingr_drp"] if "wm_q_shared_lossless" in queueProfile \ + else qosConfig[queueProfile]["pkts_num_trig_egr_drp"] + + testParams = { + "dscp": qosConfig[queueProfile]["dscp"], + "ecn": qosConfig[queueProfile]["ecn"], + "queue": qosConfig[queueProfile]["queue"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "pkts_num_leak_out": dutQosConfig["param"][portSpeedCableLength]["pkts_num_leak_out"], + "pkts_num_fill_min": qosConfig[queueProfile]["pkts_num_fill_min"], + "pkts_num_trig_drp": triggerDrop, + "cell_size": qosConfig[queueProfile]["cell_size"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.QSharedWatermarkTest", testParams=testParams) + + def testQosSaiDscpToPgMapping(self, request, ptfhost, dutTestParams, dutConfig): + """ + Test QoS SAI DSCP to PG mapping ptf test + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + disableTest = request.config.getoption("--disable_test") + if disableTest: + pytest.skip("DSCP to PG mapping test disabled") + + testParams = { + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.DscpToPgMapping", testParams=testParams) + + def testQosSaiDwrrWeightChange(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, updateSchedProfile): + """ + Test QoS SAI DWRR runtime weight change + + Args: + ptfhost (AnsibleHost): Packet Test Framework (PTF) + dutTestParams (Fixture, dict): DUT host test params + dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, + and test ports + dutQosConfig (Fixture, dict): Map containing DUT host QoS configuration + updateSchedProfile (Fxiture): Updates lossless/lossy scheduler profiles + + Returns: + None + + Raises: + RunAnsibleModuleFail if ptf test fails + """ + portSpeedCableLength = dutQosConfig["portSpeedCableLength"] + qosConfig = dutQosConfig["param"] + + testParams = { + "ecn": qosConfig["wrr_chg"]["ecn"], + "dst_port_id": dutConfig["testPorts"]["dst_port_id"], + "dst_port_ip": dutConfig["testPorts"]["dst_port_ip"], + "src_port_id": dutConfig["testPorts"]["src_port_id"], + "src_port_ip": dutConfig["testPorts"]["src_port_ip"], + "q0_num_of_pkts": qosConfig["wrr_chg"]["q0_num_of_pkts"], + "q1_num_of_pkts": qosConfig["wrr_chg"]["q1_num_of_pkts"], + "q2_num_of_pkts": qosConfig["wrr_chg"]["q2_num_of_pkts"], + "q3_num_of_pkts": qosConfig["wrr_chg"]["q3_num_of_pkts"], + "q4_num_of_pkts": qosConfig["wrr_chg"]["q4_num_of_pkts"], + "q5_num_of_pkts": qosConfig["wrr_chg"]["q5_num_of_pkts"], + "q6_num_of_pkts": qosConfig["wrr_chg"]["q6_num_of_pkts"], + "limit": qosConfig["wrr_chg"]["limit"], + "pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"], + } + testParams.update(dutTestParams["basicParams"]) + self.runPtfTest(ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams) diff --git a/ansible/roles/test/files/saitests/copp_tests.py b/tests/saitests/copp_tests.py similarity index 100% rename from ansible/roles/test/files/saitests/copp_tests.py rename to tests/saitests/copp_tests.py diff --git a/ansible/roles/test/files/saitests/ecmp_test.py b/tests/saitests/ecmp_test.py similarity index 100% rename from ansible/roles/test/files/saitests/ecmp_test.py rename to tests/saitests/ecmp_test.py diff --git a/ansible/roles/test/files/saitests/pfc_asym.py b/tests/saitests/pfc_asym.py similarity index 100% rename from ansible/roles/test/files/saitests/pfc_asym.py rename to tests/saitests/pfc_asym.py diff --git a/ansible/roles/test/files/saitests/sai_base_test.py b/tests/saitests/sai_base_test.py similarity index 100% rename from ansible/roles/test/files/saitests/sai_base_test.py rename to tests/saitests/sai_base_test.py diff --git a/ansible/roles/test/files/saitests/sai_qos_tests.py b/tests/saitests/sai_qos_tests.py similarity index 100% rename from ansible/roles/test/files/saitests/sai_qos_tests.py rename to tests/saitests/sai_qos_tests.py diff --git a/ansible/roles/test/files/saitests/switch.py b/tests/saitests/switch.py similarity index 100% rename from ansible/roles/test/files/saitests/switch.py rename to tests/saitests/switch.py