diff --git a/spytest/Doc/install.md b/spytest/Doc/install.md index b77dd79f223..18658a17ecd 100755 --- a/spytest/Doc/install.md +++ b/spytest/Doc/install.md @@ -52,5 +52,5 @@ * The exact file used for validating SPyTest is: [IxNetwork 8.42](http://downloads.ixiacom.com/support/downloads_and_updates/public/ixnetwork/IxNetwork8.42EA.exe) * The IxNetwork Server IP address needs to be given in testbed file as "ix_server" * The IxNetwork API Server needs to be launched before launching SPyTest - * For Scapy traffic generator refer to [README.testbed.Setup.md](/docs/testbed/README.testbed.Setup.md) + * For Scapy traffic generator refer to [README.testbed.Setup.md](https://github.com/Azure/sonic-mgmt/blob/master/ansible/doc/README.testbed.Setup.md) diff --git a/spytest/Doc/intro.md b/spytest/Doc/intro.md index 6ce39d23a70..3181f557109 100755 --- a/spytest/Doc/intro.md +++ b/spytest/Doc/intro.md @@ -297,7 +297,7 @@ The SPyTest supports executing tests in standalone environment and PTF environme #### PTF Mode -Refer to [README.testbed.md](/docs/ansible/README.testbed.md) for setting up PTF-32 or PTF-64 topology. +Refer to [README.testbed.md](https://github.com/Azure/sonic-mgmt/blob/master/ansible/README.testbed.md) for setting up PTF-32 or PTF-64 topology. #### Standalone Mode @@ -306,7 +306,7 @@ In standalone mode, the DUTs can be connected to each other and TGen. Environment - PTF Mode ============================== -Refer to [README.testbed.Overview.md](/docs/testbed/README.testbed.Overview.md) for setting up PTF environment details. +Refer to [README.testbed.Overview.md](https://github.com/Azure/sonic-mgmt/blob/master/ansible/doc/README.testbed.Overview.md) for setting up PTF environment details. Environment - Standalone Mode ============================== diff --git a/spytest/apis/common/asic.py b/spytest/apis/common/asic.py index 89991db1863..9ffed5c254a 100644 --- a/spytest/apis/common/asic.py +++ b/spytest/apis/common/asic.py @@ -1,9 +1,10 @@ -from spytest import st +import re +from spytest import st, cutils -def bcm_show(dut, cmd, skip_tmpl=True): +def bcm_show(dut, cmd, skip_tmpl=True, max_time=0): if not st.is_feature_supported("bcmcmd", dut): return "" - return st.show(dut, cmd, skip_tmpl=skip_tmpl) + return st.show(dut, cmd, skip_tmpl=skip_tmpl, max_time=max_time) def bcm_config(dut, cmd, skip_error_check=True): if not st.is_feature_supported("bcmcmd", dut): return "" @@ -39,15 +40,79 @@ def dump_ports_info(dut): def dump_trunk(dut): bcm_config(dut, 'bcmcmd "trunk show"') -def dump_counters(dut): - bcm_show(dut, 'bcmcmd "show c"', skip_tmpl=False) +def dump_l3_defip(dut): + bcm_show(dut, "bcmcmd 'l3 defip show'") + +def dump_l3_ip6route(dut): + bcm_show(dut, "bcmcmd 'l3 ip6route show'") + +def dump_l3_l3table(dut): + bcm_show(dut, "bcmcmd 'l3 l3table show'") + +def dump_l3_ip6host(dut): + bcm_show(dut, "bcmcmd 'l3 ip6host show'") + +def dump_counters(dut, interface=None): + if not interface: + command = 'bcmcmd "show c"' + else: + command = 'bcmcmd "show c {}"'.format(interface) + bcm_show(dut, command, skip_tmpl=True) def clear_counters(dut): bcm_config(dut, 'bcmcmd "clear c"') +def get_counters(dut, interface=None, skip_tmpl=False): + if not interface: + command = 'bcmcmd "show c"' + else: + command = 'bcmcmd "show c {}"'.format(interface) + return bcm_show(dut, command, skip_tmpl=skip_tmpl) + +def get_ipv4_route_count(dut, timeout=120): + command = 'bcmcmd "l3 defip show" | wc -l' + output = bcm_show(dut, command, skip_tmpl=True, max_time=timeout) + x = re.search(r"\d+", output) + if x: + return int(x.group()) - 5 + else: + return -1 + +def get_ipv6_route_count(dut, timeout=120): + command = 'sudo bcmcmd "l3 ip6route show" | wc -l' + output = st.show(dut, command, skip_tmpl=True, max_time=timeout) + x = re.search(r"\d+", output) + if x: + return int(x.group()) - 7 + else: + return -1 + def bcmcmd_show_ps(dut): return bcm_show(dut, 'bcmcmd "ps"') +def get_pmap(dut): + command = 'bcmcmd "show pmap"' + return bcm_show(dut, command) + +def exec_search(dut,command,param_list,match_dict,**kwargs): + output = bcm_show(dut, 'bcmcmd "{}"'.format(command)) + if not output: + st.error("output is empty") + return False + for key in match_dict.keys(): + if not cutils.filter_and_select(output,param_list,{key:match_dict[key]}): + st.error("No match for key {} with value {}".format(key, match_dict[key])) + return False + else: + st.log("Match found for key {} with value {}".format(key, match_dict[key])) + return cutils.filter_and_select(output,param_list,{key:match_dict[key]}) + +def get_l2_out(dut, mac): + return exec_search(dut,'l2 show',["gport"],{"mac":mac}) + +def get_l3_out(dut, mac): + return exec_search(dut,'l3 egress show',["port"],{"mac":mac}) + def dump_threshold_info(dut, test, platform, mode): """ BCMCMD debug prints for Threshold feature. @@ -148,3 +213,38 @@ def dump_threshold_info(dut, test, platform, mode): for each_cmd in ['bcmcmd "{}"'.format(e) for e in cmd]: bcm_config(dut, each_cmd, skip_error_check=True) +def get_intf_pmap(dut, interface_name=None): + """ + Author: Chaitanya Vella (chaitanya.vella-kumar@broadcom.com) + This API is used to get the interface pmap details + :param dut: dut + :param interface_name: List of interface names + :return: + """ + import apis.system.interface as interface_obj + ##Passing the cli_type as click in the API call "interface_status_show" because the lanes information is available only in click CLI. + ##Please refer the JIRA: SONIC-22102 for more information. + interfaces = cutils.make_list(interface_name) if interface_name else '' + if interfaces: + if any("/" in interface for interface in interfaces): + interfaces = st.get_other_names(dut, interfaces) + key = 'alias' + else: + key = 'interface' + st.debug("The interfaces list is: {}".format(interfaces)) + interface_list = interface_obj.interface_status_show(dut, interfaces=interfaces, cli_type='click') + else: + key = 'alias' if interface_obj.show_ifname_type(dut, cli_type='klish') else 'interface' + interface_list = interface_obj.interface_status_show(dut, cli_type='click') + interface_pmap = dict() + pmap_list = get_pmap(dut) + for detail in cutils.iterable(interface_list): + lane = detail["lanes"].split(",")[0] if "," in detail["lanes"] else detail["lanes"] + for pmap in pmap_list: + if pmap["physical"] == lane: + interface_pmap[detail[key]] = pmap["interface"] + return interface_pmap + +def remove_vlan_1(dut): + bcm_config(dut, 'bcmcmd "vlan remove 1 PortBitMap=all"') + diff --git a/spytest/apis/common/asic_bcm.py b/spytest/apis/common/asic_bcm.py deleted file mode 100644 index 973d32970d6..00000000000 --- a/spytest/apis/common/asic_bcm.py +++ /dev/null @@ -1,452 +0,0 @@ -# This file contains the list of API's which show and verify bcmcmd o/p. -# Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) -import re -from spytest import st -import utilities.common as utils - -import apis.system.interface as interface_obj - -from apis.common.asic import dump_l3_egress # pylint: disable=unused-import -from apis.common.asic import dump_l3_alpm # pylint: disable=unused-import -from apis.common.asic import dump_l2 # pylint: disable=unused-import -from apis.common.asic import dump_vlan # pylint: disable=unused-import -from apis.common.asic import dump_multicast # pylint: disable=unused-import -from apis.common.asic import dump_ipmc_table # pylint: disable=unused-import -from apis.common.asic import dump_ports_info # pylint: disable=unused-import -from apis.common.asic import dump_trunk # pylint: disable=unused-import -from apis.common.asic import dump_counters # pylint: disable=unused-import -from apis.common.asic import clear_counters # pylint: disable=unused-import -from apis.common.asic import dump_threshold_info # pylint: disable=unused-import - - -def bcmcmd_show(dut, command): - """ - This is common API for all bcmcmd show commands - Only Templates files should add for all show brcmcmd commands - Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) - - :param dut: - :param command: - :return: - - EX: - bcmcmd_show(dut,'l2 show') - bcmcmd_show(dut,'vlan show') - bcmcmd_show(dut,'trunk show') - """ - command = 'bcmcmd "{}"'.format(command) - return st.config(dut, command) - -def read_l2(dut): - """ - Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) - :param dut: - :return: mac, vlan, gport, modid, port, type - """ - return st.show(dut, 'bcmcmd "l2 show" ') - -def verify_bcmcmd_output(dut, command, **kwargs): - """ - Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) - - :param dut: - :param command: l2_show, vlan_show, pvlan_show , trunk_show - :param kwargs: based on command pass the values - :return: - """ - if command == "trunk_show": - output = st.show(dut, 'bcmcmd "trunk show" ') - else: - st.error("Invalid command = {}".format(command)) - return False - for each in kwargs.keys(): - if not utils.filter_and_select(output, None, {each: kwargs[each]}): - st.error("No match for {} = {} in table".format(each, kwargs[each])) - return False - return True - -def bcmcmd_show_c(dut, interface=None, skip_tmpl=False): - """ - Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) - :param dut: - :param interface: - :param skip_tmpl: - :return: - """ - if not interface: - command = 'bcmcmd "show c"' - else: - command = 'bcmcmd "show c {}"'.format(interface) - return st.show(dut, command, skip_tmpl=skip_tmpl) - -def bcmcmd_show_pmap(dut): - """ - Author: Chaitanya Vella (chaitanya.vella-kumar@broadcom.com) - This API is used to get the bcmcmd 'show pmap' output - :param dut: DUT object - :return: - """ - command = 'bcmcmd "show pmap"' - return st.show(dut, command) - - -def bcmcmd_show_ps(dut): - """ - Author: Chaitanya Vella (chaitanya.vella-kumar@broadcom.com) - This API is used to get the bcmcmd 'ps' output - :param dut: DUT object - :return: - """ - command = 'bcmcmd "ps"' - return st.show(dut, command) - - -def bcmcmd_vlan_add(dut): - """ - Author: Lavanya Harivelam (lavanya.harivelam@broadcom.com) - This API is used to get the bcmcmd 'ps' output - :param dut: DUT object - :return: - """ - command = 'bcmcmd "vlan add 1000 PortBitMap=CPU"' - return st.config(dut, command) - -def bcm_cmd_l3_intf_show(dut, **kwargs): - command = "bcmcmd 'l3 intf show'" - output = st.show(dut, command) - st.debug(output) - for each in kwargs.keys(): - if utils.filter_and_select(output, None, {each: kwargs[each]}): - st.error("No match for {} = {} in table".format(each, kwargs[each])) - return False - return True - - -def bcmcmd_route_config(dut, **kwargs): - """ - :param: dut: - :type: dut: - :param: vrf: - :type: int: - :param: ip: - :type: str: - :param: mask: - :type: str: - :param: intf: - :type: int: - :param: af: - :type: str: - :return: - :type: bool: - """ - st.log("Creating an {} route using bcmcmd".format(kwargs["af"])) - command = '' - if not kwargs["vrf"] and not kwargs["ip"] and not kwargs["mask"] and not kwargs["intf"] and not kwargs["action"]: - st.error("Mandatory params vrf, ip, mask, intf, action are not provided") - return False - - if kwargs["action"] == "add": - if kwargs["af"] == "ipv4": - command = "bcmcmd 'l3 defip add VRF={} IP={} Mask={} INtf={}'".format(kwargs["vrf"], kwargs["ip"], - kwargs["mask"], kwargs["intf"]) - if "ecmp" in kwargs: - command = "bcmcmd 'l3 defip add VRF={} IP={} Mask={} INtf={} ECMP={}'".format(kwargs["vrf"], - kwargs["ip"], - kwargs["mask"], - kwargs["intf"], - kwargs["ecmp"]) - elif kwargs["af"] == "ipv6": - command = "bcmcmd 'l3 ip6route add VRF={} IP={} MaskLen={} INtf={}'".format(kwargs["vrf"], kwargs["ip"], - kwargs["mask"], kwargs["intf"]) - - if kwargs["action"] == "delete": - if kwargs["af"] == "ipv4": - command = "bcmcmd 'l3 defip destroy VRF={} IP={} Mask={} INtf={}'".format(kwargs["vrf"], kwargs["ip"], - kwargs["mask"], kwargs["intf"]) - - elif kwargs["af"] == "ipv6": - command = "bcmcmd 'l3 ip6route destroy VRF={} IP={} MaskLen={} INtf={}'".format(kwargs["vrf"], kwargs["ip"], - kwargs["mask"], - kwargs["intf"]) - st.debug(command) - st.config(dut, command, skip_error_check=True) - return True - - -def bcmcmd_nbr_config(dut, **kwargs): - """ - :param: dut: - :type: dut: - :param: ip: - :type: str: - :param: intf: - :type: int: - :param: af: - :type: str: - :return: - :type: bool: - """ - st.log("Creating an {} nbr using bcmcmd".format(kwargs["af"])) - command = '' - if not kwargs["ip"] and not kwargs["intf"] and not kwargs["action"]: - st.error("Mandatory params ip, intf, action are not provided") - return False - - if kwargs["action"] == "add": - if kwargs["af"] == "ipv4": - command = "bcmcmd 'l3 l3table add IP={} INtf={}'".format(kwargs["ip"], kwargs["intf"]) - - elif kwargs["af"] == "ipv6": - command = "bcmcmd 'l3 ip6host add IP={} INtf={}'".format(kwargs["ip"], kwargs["intf"]) - - if kwargs["action"] == "delete": - if kwargs["af"] == "ipv4": - command = "bcmcmd 'l3 l3table destroy IP={}'".format(kwargs["ip"]) - - elif kwargs["af"] == "ipv6": - command = "bcmcmd 'l3 ip6host destroy IP={}'".format(kwargs["ip"]) - - if kwargs["action"] == "clear": - if kwargs["af"] == "ipv4": - command = "bcmcmd 'l3 l3table destroy IP={}'".format(kwargs["ip"]) - - elif kwargs["af"] == "ipv6": - command = "bcmcmd 'l3 ip6host clear'" - - st.debug(command) - st.config(dut, command, skip_error_check=True) - return True - - -def bcmcmd_l3_defip_show(dut, match={}, items=[]): - """ - :param dut: - :param match: - :param items: - :return: vrf, route, nhpmac, intf - """ - command = "bcmcmd 'l3 defip show'" - output = st.show(dut, command) - st.debug(output) - if match and items: - return utils.filter_and_select(output, items, match) - if match and not items: - return utils.filter_and_select(output, None, match) - return output - - -def bcmcmd_l3_ip6route_show(dut, match={}, items=[]): - """ - :param dut: - :param match: - :param items: - :return: vrf, route, nhpmac, intf - """ - command = "bcmcmd 'l3 ip6route show'" - output = st.show(dut, command) - st.debug(output) - if match and items: - return utils.filter_and_select(output, items, match) - if match and not items: - return utils.filter_and_select(output, None, match) - return output - - -def bcmcmd_l3_l3table_show(dut): - """ - :param dut: - :return: nbrip, egrintf - """ - command = "bcmcmd 'l3 l3table show'" - return st.show(dut, command) - - -def bcmcmd_l3_ip6host_show(dut): - """ - :param dut: - :return: nbrip, egrintf - """ - command = "bcmcmd 'l3 ip6host show'" - return st.show(dut, command) - - -def verify_bcmcmd_routing_output(dut, command, **kwargs): - """ - :param dut: - :param command: - :type :show command: "l3 defip show", "l3 ip6route show", "l3 l3table show", "l3 ip6host show" - :param :kwargs: - :type :based on arguments passed: - :return: - """ - - if command == "l3_defip_show": - output = bcmcmd_l3_defip_show(dut) - elif command == "l3_ip6route_show": - output = bcmcmd_l3_ip6route_show(dut) - elif command == "l3_l3table_show": - output = bcmcmd_l3_l3table_show(dut) - elif command == "l3_ip6host_show": - output = bcmcmd_l3_ip6host_show(dut) - else: - st.error("Invalid command = {}".format(command)) - return False - - for each in kwargs.keys(): - if not utils.filter_and_select(output, None, {each: kwargs[each]}): - st.error("No match for {} = {} in table".format(each, kwargs[each])) - return False - return True - - -def bcmcmd_get_l3_entry(dut): - command = "bcmcmd 'listmem l3_entry'" - output = st.show(dut, command) - st.debug(output) - return output - - -def bcmcmd_get_defip(dut, name=None): - """ - To get listmem DEFIP values. - :param dut: - :param name: - :return: - """ - command = "bcmcmd 'listmem DEFIP'" - output = st.show(dut, command) - st.debug(output) - if not name: - return output - else: - out = utils.filter_and_select(output, ['entries'], {'names': name}) - if out: - return int(out[0]['entries']) - else: - return False - - -def bcmcmd_l3_entry_only_config(dut, **kwargs): - if "action" not in kwargs: - st.error("Mandatory params num, action are not provided") - return False - valid = 1 - if kwargs["action"] == "delete": - valid = 0 - output = bcmcmd_get_l3_entry(dut) - name_list = utils.dicts_list_values(output, "names") - if "L3_ENTRY_ONLY" in name_list: - num = int(utils.filter_and_select(output, [], {"names": "L3_ENTRY_ONLY"})[0]['entries']) - 1 - command = "bcmcmd 'mod L3_ENTRY_ONLY 1 {} VALID={}'".format(num, valid) - elif "L3_ENTRY_ONLY_SINGLE" in name_list: - num = int(utils.filter_and_select(output, [], {"names": "L3_ENTRY_ONLY_SINGLE"})[0]['entries']) - 1 - command = "bcmcmd 'mod L3_ENTRY_ONLY_SINGLE 1 {} BASE_VALID={}'".format(num, valid) - elif "L3_ENTRY_SINGLE" in name_list: - num = int(utils.filter_and_select(output, [], {"names": "L3_ENTRY_SINGLE"})[0]['entries']) - 1 - command = "bcmcmd 'mod L3_ENTRY_SINGLE 1 {} BASE_VALID={}'".format(num, valid) - else: - st.error("L3_ENTRY_ONLY | L3_ENTRY_ONLY_SINGLE | L3_ENTRY_SINGLE not found in - listmem l3_entry") - return False - st.config(dut, command) - return True - - -def bcmcmd_l3_multipath_add(dut, **kwargs): - if "size" not in kwargs: - st.error(" Mandatory param not provided") - return False - command = "bcmcmd 'l3 multipath add size={}".format(kwargs["size"]) - if "intf0" in kwargs: - command += " intf0={}".format(kwargs["intf0"]) - if "intf1" in kwargs: - command += " intf1={}".format(kwargs["intf1"]) - command += "'" - st.debug(command) - st.config(dut, command) - return True - - -def bcmcmd_route_count_hardware(dut, timeout=120): - command = 'bcmcmd "l3 defip show" | wc -l' - output = st.show(dut, command, skip_tmpl=True, max_time=timeout) - x = re.search(r"\d+", output) - if x: - return int(x.group()) - 5 - else: - return -1 - - -def bcmcmd_ipv6_route_count_hardware(dut, timeout=120): - command = 'sudo bcmcmd "l3 ip6route show" | wc -l' - output = st.show(dut, command, skip_tmpl=True, max_time=timeout) - x = re.search(r"\d+", output) - if x: - return int(x.group()) - 7 - else: - return -1 - -def get_interface_pmap_details(dut, interface_name=None): - """ - Author: Chaitanya Vella (chaitanya.vella-kumar@broadcom.com) - This API is used to get the interface pmap details - :param dut: dut - :param interface_name: List of interface names - :return: - """ - ##Passing the cli_type as click in the API call "interface_status_show" because the lanes information is available only in click CLI. - ##Please refer the JIRA: SONIC-22102 for more information. - interfaces = utils.make_list(interface_name) if interface_name else '' - if interfaces: - if any("/" in interface for interface in interfaces): - interfaces = st.get_other_names(dut, interfaces) - key = 'alias' - else: - key = 'interface' - st.debug("The interfaces list is: {}".format(interfaces)) - interface_list = interface_obj.interface_status_show(dut, interfaces=interfaces, cli_type='click') - else: - key = 'alias' if interface_obj.show_ifname_type(dut, cli_type='klish') else 'interface' - interface_list = interface_obj.interface_status_show(dut, cli_type='click') - interface_pmap = dict() - pmap_list = bcmcmd_show_pmap(dut) - for detail in interface_list: - lane = detail["lanes"].split(",")[0] if "," in detail["lanes"] else detail["lanes"] - for pmap in pmap_list: - if pmap["physical"] == lane: - interface_pmap[detail[key]] = pmap["interface"] - st.debug(interface_pmap) - return interface_pmap - - -def get_param_from_bcmcmd_output(dut,command,param_list,match_dict,**kwargs): - """ - purpose: - This definition is used to get a particular field for the matched entry/row - from bcmcmd show output - Arguments: - :param dut: device to be configured - :type dut: string - :param command: command to be executed - :type command: string - :param param_list: field/parameter list to be returned back - :type param_list: list - :param match_dict: key and value to match an entry/row - :type match_dict: dictionary - :return: False/output; matched entry/row in Pass case - - usage: - get_param_from_bcmcmd_output(vars.D1,["gport"],{"mac" : "b8:6a:97:8a:8c:68"}) - get_param_from_bcmcmd_output(vars.D1,["port"],{"mac" : "b8:6a:97:8a:8c:68"}) - """ - command = 'bcmcmd "{}"'.format(command) - output = st.show(dut, command) - if not output: - st.error("output is empty") - return False - for key in match_dict.keys(): - if not utils.filter_and_select(output,param_list,{key:match_dict[key]}): - st.error("No match for key {} with value {}".format(key, match_dict[key])) - return False - else: - st.log("Match found for key {} with value {}".format(key, match_dict[key])) - return utils.filter_and_select(output,param_list,{key:match_dict[key]}) diff --git a/spytest/apis/common/base_config.py b/spytest/apis/common/base_config.py index 791843566e9..bd465f6612c 100644 --- a/spytest/apis/common/base_config.py +++ b/spytest/apis/common/base_config.py @@ -3,8 +3,22 @@ def init(dut): st.create_init_config_db(dut) +def remove_vlan_1(dut): + if not st.is_feature_supported("sai-removes-vlan-1", dut): + import apis.common.asic as asicapi + asicapi.remove_vlan_1(dut) + +def post_reboot(dut, is_upgrade=False): + st.banner("Remove VLAN-1 post reboot", dut=dut) + remove_vlan_1(dut) + def extend(dut): st.log("Extend base config if needed", dut=dut) - st.config(dut, "config feature state nat enabled") - st.config(dut, "config feature state sflow enabled") + if not st.is_feature_supported("nat-default-enabled", dut): + st.config(dut, "config feature state nat enabled") + if not st.is_feature_supported("sflow-default-enabled", dut): + st.config(dut, "config feature state sflow enabled") + #st.config(dut, "configure lldp status disabled", type='lldp') + st.banner("Remove VLAN-1 in base config", dut=dut) + remove_vlan_1(dut) diff --git a/spytest/apis/common/hooks.py b/spytest/apis/common/hooks.py index ed3f0b273e1..a369d33b403 100644 --- a/spytest/apis/common/hooks.py +++ b/spytest/apis/common/hooks.py @@ -67,6 +67,7 @@ def post_reboot(self, dut, is_upgrade=False): ntp.ensure_ntp_config(dut) if st.getenv("SPYTEST_GENERATE_CERTIFICATE", "0") != "0": basic.ensure_certificate(dut) + base_config.post_reboot(dut, is_upgrade=is_upgrade) def init_base_config(self, dut): base_config.init(dut) @@ -178,3 +179,7 @@ def debug_system_status(self, dut): def dut_reboot(self, dut, method='normal',cli_type=''): return reboot.dut_reboot(dut, method, cli_type) + def get_onie_grub_config(self, dut, mode): + from apis.system.boot_up import get_onie_grub_config + return get_onie_grub_config(dut, mode) + diff --git a/spytest/apis/qos/acl.py b/spytest/apis/qos/acl.py index 7aa6effbcca..d7f1da0e7b3 100644 --- a/spytest/apis/qos/acl.py +++ b/spytest/apis/qos/acl.py @@ -261,14 +261,14 @@ def create_acl_rule(dut, skip_verify=True, acl_type=None, host_1=None, host_2=No packet_action = action_mapping[packet_action.lower()] rule_create = json.loads(""" { - "openconfig-acl:acl-entries": { - "acl-entry": [{ - "sequence-id": 0, - "config": {"sequence-id": 0,"description": "string"}, - "actions": {"config": {"forwarding-action": "string"}}, - "transport": {"config": {}} - }] - } + "openconfig-acl:acl-entries": { + "acl-entry": [{ + "sequence-id": 0, + "config": {"sequence-id": 0,"description": "string"}, + "actions": {"config": {"forwarding-action": "string"}}, + "transport": {"config": {}} + }] + } } """) config = dict() @@ -307,16 +307,36 @@ def create_acl_rule(dut, skip_verify=True, acl_type=None, host_1=None, host_2=No if src_ip != "any": config["config"]["source-address"] = src_ip # config ports - if dst_port: + if dst_comp_operator == 'lt': + dst_port_range = "0 {}".format(dst_port) + transport_data["destination-port"] = "{}..{}".format(dst_port_range.split()[0],dst_port_range.split()[1]) + elif dst_comp_operator == 'gt': + dst_port_range = "{} 65500".format(dst_port) + transport_data["destination-port"] = "{}..{}".format(dst_port_range.split()[0],dst_port_range.split()[1]) + elif dst_port: transport_data["destination-port"] = int(dst_port) + st.log(transport_data) elif dst_port_range: transport_data["destination-port"] = "{}..{}".format(dst_port_range.split()[0],dst_port_range.split()[1]) - if src_port: + if src_comp_operator == 'lt': + src_port_range = "0 {}".format(src_port) + transport_data["source-port"] = "{}..{}".format(src_port_range.split()[0],src_port_range.split()[1]) + elif src_comp_operator == 'gt': + src_port_range = "{} 65500".format(src_port) + transport_data["source-port"] = "{}..{}".format(src_port_range.split()[0],src_port_range.split()[1]) + elif src_port: transport_data["source-port"] = int(src_port) + st.log(transport_data) elif src_port_range: + st.log(src_port_range) transport_data["source-port"] = "{}..{}".format(src_port_range.split()[0],src_port_range.split()[1]) + st.log(transport_data) if tcp_flag: - transport_data["tcp-flags"] = ["TCP_RST"] + transport_data["tcp-flags"] = ["TCP_{}".format(temp.upper().replace('-','_')) for temp in tcp_flag.split()] + st.log(transport_data) + if tcp_flag == "established": + transport_data["openconfig-acl-ext:tcp-session-established"] = True + transport_data.pop("tcp-flags") rule_create["openconfig-acl:acl-entries"]["acl-entry"][0]["transport"]["config"] = transport_data if acl_type == "ip": acl_type = "ipv4" @@ -844,28 +864,28 @@ def config_access_group(dut, acl_type=None, **kwargs): elif cli_type in ["rest-patch","rest-put"]: config_group = json.loads(""" { - "openconfig-acl:interface": [{ - "id": "string", - "config": { - "id": "string" - }, - "interface-ref": { - "config": { - "interface": "string" - } - } - }] + "openconfig-acl:interface": [{ + "id": "string", + "config": { + "id": "string" + }, + "interface-ref": { + "config": { + "interface": "string" + } + } + }] } """) acl_set = json.loads(""" [{ - "type": "string", - "config": { - "type": "string", - "set-name": "string" - }, - "set-name": "string" - }] + "type": "string", + "config": { + "type": "string", + "set-name": "string" + }, + "set-name": "string" + }] """) acl_set[0]["type"] = acl_set[0]["config"]["type"] = acl_type acl_set[0]["set-name"] = acl_set[0]["config"]["set-name"] = table_name diff --git a/spytest/apis/qos/copp.py b/spytest/apis/qos/copp.py index f8d5f39bd58..00b2fbc6ea3 100644 --- a/spytest/apis/qos/copp.py +++ b/spytest/apis/qos/copp.py @@ -4,10 +4,10 @@ import re from spytest import st from spytest.utils import filter_and_select -import apis.common.asic_bcm as asicapi +import apis.common.asic as asicapi +from apis.system.rest import get_rest import utilities.utils as utils_obj from utilities.common import do_eval -from apis.system.rest import get_rest def get_copp_config(dut, **kwargs): """ @@ -826,7 +826,7 @@ def get_show_c_cpuq_counter(dut,queue): for itercountvar in range(nooftimes): if itercountvar != 0: st.wait(5) - cli_out = asicapi.bcmcmd_show_c(dut) + cli_out = asicapi.get_counters(dut) fil_out = filter_and_select(cli_out, ["value"], {"key": queue}) if not fil_out: fil_out = filter_and_select(cli_out, ["value"], {"key": queue_mc}) @@ -1019,3 +1019,53 @@ def config_coppgroup_copptrap_viarest(dut, **kwargs): return True + +def set_copp_pir_config(dut, config, *args): + """ + To set the config into copp_config.json + Author : vishnuvardhan.talluri@broadcom.com + + :param dut: + :param config: + :param args: + :return: + """ + + command = "sudo cat /etc/sonic/copp_config.json" + output = st.show(dut, command, skip_tmpl=True) + reg_output = utils_obj.remove_last_line_from_string(output) + try: + data = eval(reg_output) + except Exception as e: + st.log(e) + reg_output = str(reg_output) + "\n" + "}" + data = eval(reg_output) + st.log("ARGS {}".format(args)) + if config == "get": + return data + + for eachli in args: + if len(eachli) != 3: + st.error("Invalid input is provided {}".format(eachli)) + return False + table = eachli[0] + attribute = eachli[1] + value = eachli[2] + found_table = False + if table in data['SCHEDULER'].keys(): + data['SCHEDULER'][table][attribute] = value + found_table = True + if not found_table: + st.error("Table not found {}".format(table)) + return False + + file_path = utils_obj.write_to_json_file(data, "/tmp/00-copp.config.json") + st.log("FILE PATH -- {}".format(file_path)) + st.upload_file_to_dut(dut, file_path, "/tmp/00-copp.config.json") + command = "sudo cp /tmp/00-copp.config.json /etc/sonic/copp_config.json" + st.config(dut, command) + command = "rm /tmp/00-copp.config.json" + st.config(dut, command) + return True + + diff --git a/spytest/apis/routing/arp.py b/spytest/apis/routing/arp.py index 29bb0fa6319..f36ca863c4c 100644 --- a/spytest/apis/routing/arp.py +++ b/spytest/apis/routing/arp.py @@ -133,7 +133,7 @@ def add_static_arp(dut, ipaddress, macaddress, interface="", cli_type=""): if interface: intf = get_interface_number_from_name(interface) command = "interface {} {}".format(intf['type'], intf['number']) - command = command + "\n" + "ip arp {} {}".format(ipaddress, macaddress) + command = command + "\n" + "ip arp {} {}".format(ipaddress, macaddress) + "\n" + "exit" else: st.error("'interface' option is mandatory for adding static arp entry in KLISH") return False @@ -156,7 +156,7 @@ def add_static_arp(dut, ipaddress, macaddress, interface="", cli_type=""): return True -def delete_static_arp(dut, ipaddress, interface="", mac="", cli_type=""): +def delete_static_arp(dut, ipaddress, interface="", mac="", cli_type="",vrf=""): """ To delete static arp Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) @@ -176,7 +176,7 @@ def delete_static_arp(dut, ipaddress, interface="", mac="", cli_type=""): if mac: macaddress = mac else: - output = show_arp(dut, ipaddress=ipaddress, interface=interface) + output = show_arp(dut, ipaddress=ipaddress, interface=interface,vrf=vrf) if len(output) == 0: st.error("Did not find static arp entry with IP : {} and Interface : {}".format(ipaddress, interface)) return False @@ -184,7 +184,7 @@ def delete_static_arp(dut, ipaddress, interface="", mac="", cli_type=""): macaddress = output[0]["macaddress"] intf = get_interface_number_from_name(interface) command = "interface {} {}".format(intf['type'], intf['number']) - command = command + "\n" + "no ip arp {} {}".format(ipaddress, macaddress) + command = command + "\n" + "no ip arp {} {}".format(ipaddress, macaddress) + "\n" + "exit" else: st.error("'interface' option is mandatory for deleting static arp entry in KLISH") return False @@ -652,7 +652,16 @@ def _get_rest_neighbor_entries(dut, interface, is_arp=True): non_physical_ports = ['vlan'] rest_urls = st.get_datastore(dut, 'rest_urls') intf_index = get_subinterface_index(dut, interface) - url = rest_urls['get_arp_per_port'].format(name=interface, index=intf_index) if is_arp else rest_urls['get_ndp_per_port'].format(name=interface, index=intf_index) + if is_arp: + if any(port in interface.lower() for port in non_physical_ports): + url = rest_urls['get_arp_per_vlan_port'].format(name=interface) + else: + url = rest_urls['get_arp_per_port'].format(name=interface, index=intf_index) + else: + if any(port in interface.lower() for port in non_physical_ports): + url = rest_urls['get_ndp_per_vlan_port'].format(name=interface) + else: + url = rest_urls['get_ndp_per_port'].format(name=interface, index=intf_index) intf = 'iface' if is_arp else 'interface' out = get_rest(dut, rest_url=url) arp_entries = out['output']['openconfig-if-ip:neighbors']['neighbor'] if isinstance(out, dict) and out.get('output') and 'openconfig-if-ip:neighbors' in out['output'] and out['output']['openconfig-if-ip:neighbors'].get('neighbor') and isinstance(out['output']['openconfig-if-ip:neighbors']['neighbor'], list) else '' diff --git a/spytest/apis/routing/bfd.py b/spytest/apis/routing/bfd.py index ff8fff769da..812c8d25e83 100644 --- a/spytest/apis/routing/bfd.py +++ b/spytest/apis/routing/bfd.py @@ -2,12 +2,12 @@ import datetime from spytest import st -from apis.common.asic_bcm import bcmcmd_l3_ip6route_show, bcmcmd_l3_defip_show +import apis.common.asic as asicapi from apis.system.interface import clear_interface_counters, show_interface_counters_all from apis.routing.arp import show_arp, show_ndp from apis.system.rest import config_rest, get_rest, delete_rest - -from utilities.common import filter_and_select, make_list, exec_all +from apis.routing import ip as ip_api +from utilities.common import filter_and_select, make_list, exec_all, is_valid_ipv4 def verify_bfd_counters(dut,**kwargs): """ @@ -471,6 +471,9 @@ def verify_bfd_peer(dut,**kwargs): else: is_mhop = False + ping_verify = True if 'ping_verify' in kwargs else False + kwargs.pop('ping_verify', '') + rv = False #Converting all kwargs to list type to handle single or multiple peers for key in kwargs: @@ -548,7 +551,8 @@ def verify_bfd_peer(dut,**kwargs): try: parsed_output = st.show(dut, cmd, type=cli_type) except Exception as e: - st.error("The BFD session is not existing either deleted or not configured: exception is {} ".format(e)) + st.error("The BFD session is not exist either deleted or not configured: exception is {} ".format(e)) + if ping_verify: debug_bfd_ping(dut, kwargs['peer'], vrf_name=vrf, cli_type=cli_type) return False elif cli_type in ['rest-patch', 'rest-put']: # st.show(dut, cmd, type='klish') @@ -578,6 +582,7 @@ def verify_bfd_peer(dut,**kwargs): if len(parsed_output) == 0: st.error("OUTPUT is Empty") + if ping_verify: debug_bfd_ping(dut, kwargs['peer'], vrf_name=vrf, cli_type=cli_type) return False #Get the index of peer from list of parsed output for i in range(len(kwargs['peer'])): @@ -594,9 +599,11 @@ def verify_bfd_peer(dut,**kwargs): rv=True else: st.error('Match Not Found for %s :: Expected: %s Actual : %s'%(k,kwargs[k][i],parsed_output[peer_index][k])) + if ping_verify: debug_bfd_ping(dut, kwargs['peer'][i], vrf_name=vrf, enable_debug=False, cli_type=cli_type) return False else: st.error(" BFD Peer %s not in output"%kwargs['peer'][i]) + if ping_verify: debug_bfd_ping(dut, kwargs['peer'], vrf_name=vrf, cli_type=cli_type) return False return rv @@ -815,14 +822,26 @@ def get_bfd_peers_brief(dut, cli_type=''): return False +def debug_bfd_ping(dut, addresses, vrf_name='default', enable_debug=True, cli_type=''): + st.banner("********* Ping Dubug commands starts ************") + for addr in addresses: + family = 'ipv4' if is_valid_ipv4(addr) else 'ipv6' + if vrf_name in ['default', 'default-vrf']: + ip_api.ping(dut, addr, family, cli_type=cli_type) + else: + ip_api.ping(dut, addr, family, interface=vrf_name, cli_type=cli_type) + if enable_debug: debug_bgp_bfd(dut) + + def debug_bgp_bfd(dut): dut_list = make_list(dut) - st.log("********* Dubug commands starts ************") - func_list = [clear_interface_counters, show_arp, show_ndp, show_interface_counters_all, bcmcmd_l3_ip6route_show, bcmcmd_l3_defip_show] + st.banner("********* Dubug commands starts ************") + func_list = [clear_interface_counters, show_arp, show_ndp, show_interface_counters_all, + asicapi.dump_l3_ip6route, asicapi.dump_l3_defip] for func in func_list: api_list = [[func, dut] for dut in dut_list] exec_all(True, api_list) - st.log(" ******** End of Dubug commands ************") + st.banner(" ******** End of Dubug commands ************") def rest_get_bfd_peer_info(dut, type, bfd_type=None, peer_list=True, **kwargs): diff --git a/spytest/apis/routing/bgp.py b/spytest/apis/routing/bgp.py index 967e112086f..abb528870b5 100644 --- a/spytest/apis/routing/bgp.py +++ b/spytest/apis/routing/bgp.py @@ -6,9 +6,9 @@ from spytest import st, putils import apis.system.reboot as reboot -from apis.system.rest import config_rest, delete_rest +from apis.system.rest import config_rest, delete_rest, get_rest , rest_status -from utilities.utils import fail_on_error, get_interface_number_from_name, is_valid_ip_address +from utilities.utils import fail_on_error, get_interface_number_from_name, is_valid_ip_address, convert_microsecs_to_time from utilities.common import filter_and_select def get_forced_cli_type(cmd_type): @@ -24,6 +24,8 @@ def get_cfg_cli_type(dut, **kwargs): cli_type = cli_type or st.get_ui_type(dut, **kwargs) if cli_type in ["click", "vtysh"]: cli_type = "vtysh" + elif cli_type in ["rest-patch","rest-put"]: + cli_type = "rest-patch" else: cli_type = "klish" return cli_type @@ -33,6 +35,8 @@ def get_show_cli_type(dut, **kwargs): cli_type = cli_type or st.get_ui_type(dut, **kwargs) if cli_type in ["click", "vtysh"]: cli_type = "vtysh" + elif cli_type in ["rest-patch","rest-put"]: + cli_type = "rest-patch" else: cli_type = "klish" return cli_type @@ -64,6 +68,7 @@ def enable_router_bgp_mode(dut, **kwargs): :return: """ cli_type = get_cfg_cli_type(dut, **kwargs) + st.log("Enabling router BGP mode ..") if cli_type in ['vtysh', 'click']: cli_type ='vtysh' st.log("Enabling router BGP mode ..") @@ -84,6 +89,19 @@ def enable_router_bgp_mode(dut, **kwargs): command += ' vrf ' + kwargs['vrf_name'] if 'router_id' in kwargs: command += ' router-id {}'.format(kwargs['router_id']) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf_name = kwargs['vrf_name'] if 'vrf_name' in kwargs and kwargs['vrf_name'] != 'default-vrf' else "default" + url = rest_urls['bgp_global_config'].format(vrf_name) + json_data = dict() + if 'local_asn' in kwargs: + json_data["openconfig-network-instance:as"] = int(kwargs['local_asn']) + if 'router_id' in kwargs: + json_data["openconfig-network-instance:router-id"] = kwargs['router_id'] + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=json_data): + st.error("Error in configuring AS number / router ID") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -119,6 +137,24 @@ def config_router_bgp_mode(dut, local_asn, config_mode='enable', vrf='default', command = "router bgp {} vrf {}".format(local_asn, vrf) else: command = "no router bgp vrf {}".format(vrf) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf_name = "default" if vrf.lower() == 'default' else vrf.lower() + if not mode: + url = rest_urls['bgp_global_config'].format(vrf_name) + json_data = dict() + json_data["openconfig-network-instance:config"] = dict() + json_data["openconfig-network-instance:config"].update({'as': int(local_asn)}) + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=json_data): + st.error("Error in configuring AS number") + return False + return True + else: + url = rest_urls['bgp_as_config'].format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("Error in Unconfiguring AS number") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -143,6 +179,14 @@ def unconfig_router_bgp(dut, **kwargs): command = "no router bgp vrf {}".format(kwargs.get("vrf_name")) else: command = "no router bgp" + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf_name = kwargs['vrf_name'] if kwargs.get('vrf_name') else "default" + url = rest_urls['bgp_as_config'].format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("Error in Unconfiguring router BGP") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -163,6 +207,13 @@ def cleanup_router_bgp(dut_list, cli_type="", skip_error_check=True): st.log("Cleanup BGP mode ..") command = "no router bgp" st.config(dut, command, type=cli_type, skip_error_check=skip_error_check) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + url = rest_urls['bgp_config'].format("default") + if not delete_rest(dut, rest_url=url): + st.error("Error in Unconfiguring router BGP") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -177,36 +228,46 @@ def _cleanup_bgp_config(dut_list, cli_type=""): """ dut_li = list(dut_list) if isinstance(dut_list, list) else [dut_list] for dut in dut_li: - command = "show running bgp" - output = st.show(dut, command, type="vtysh", skip_error_check=True) - st.log("Cleanup BGP configuration on %s.." % dut) - config = output.splitlines() - line = 0 - count = len(config) - bgp_inst = [] - cli_type = get_cfg_cli_type(dut, cli_type=cli_type) - while line < count: - _str = config[line] - if re.match(r'router bgp .*', _str, re.IGNORECASE): - if cli_type =="klish": - _newstr =' '.join([i for i in _str.split(" ") if not i.isdigit()]) - if "vrf" in _str: - bgp_inst.insert(0, _newstr) - else: - bgp_inst.append(_newstr) - else: - if "vrf" in _str: - bgp_inst.insert(0, _str) + if cli_type in ["click", "klish", "vtysh"]: + command = "show running bgp" + output = st.show(dut, command, type="vtysh", skip_error_check=True) + st.log("Cleanup BGP configuration on %s.." % dut) + config = output.splitlines() + line = 0 + count = len(config) + bgp_inst = [] + cli_type = get_cfg_cli_type(dut, cli_type=cli_type) + while line < count: + _str = config[line] + if re.match(r'router bgp .*', _str, re.IGNORECASE): + if cli_type =="klish": + _newstr =' '.join([i for i in _str.split(" ") if not i.isdigit()]) + if "vrf" in _str: + bgp_inst.insert(0, _newstr) + else: + bgp_inst.append(_newstr) else: - bgp_inst.append(_str) - - while config[line] != "!": - line += 1 - line += 1 - - for inst in bgp_inst: - st.config(dut, "no {}".format(inst), type=cli_type) - + if "vrf" in _str: + bgp_inst.insert(0, _str) + else: + bgp_inst.append(_str) + + while config[line] != "!": + line += 1 + line += 1 + + for inst in bgp_inst: + st.config(dut, "no {}".format(inst), type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + url = rest_urls['bgp_config'].format("default") + if not delete_rest(dut, rest_url=url): + st.error("Error in Unconfiguring router BGP") + return False + return True + else: + st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) + return False return True @@ -256,6 +317,35 @@ def config_bgp_router(dut, local_asn, router_id='', keep_alive=60, hold=180, con if config == 'no' and router_id: command += "no router-id \n" command += "exit" + elif cli_type in ["rest-patch", "rest-put"]: + + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf_name = kwargs['vrf_name'] if 'vrf_name' in kwargs and kwargs['vrf_name'] != 'default-vrf' else "default" + url = rest_urls['bgp_global_config'].format(vrf_name) + json_data = dict() + json_data["openconfig-network-instance:config"] = dict() + json_data["openconfig-network-instance:config"].update({'as': int(local_asn)}) + if config == 'yes': + if router_id: + json_data["openconfig-network-instance:config"].update({'router-id': router_id}) + if keep_alive: + json_data["openconfig-network-instance:config"].update({'openconfig-bgp-ext:keepalive-interval': str(keep_alive)}) + if hold: + json_data["openconfig-network-instance:config"].update({'openconfig-bgp-ext:hold-time': str(hold)}) + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=json_data): + st.error("Error in configuring router BGP") + return False + + if config == 'no' and keep_alive: + url = rest_urls['bgp_keepalive_config'].format("default") + delete_rest(dut, rest_url=url) + if config == 'no' and hold: + url = rest_urls['bgp_holdtime_config'].format("default") + delete_rest(dut, rest_url=url) + if config == 'no' and router_id: + url = rest_urls['bgp_routerid_config'].format("default") + delete_rest(dut, rest_url=url) + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -291,6 +381,23 @@ def create_bgp_router(dut, local_asn, router_id='', keep_alive=60, hold=180, cli command.append("router-id {}".format(router_id)) command.append("timers {} {}".format(keep_alive, hold)) command.append("exit") + elif cli_type in ["rest-patch", "rest-put"]: + + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf_name = "default" + url = rest_urls['bgp_global_config'].format(vrf_name) + json_data = dict() + json_data["openconfig-network-instance:as"] = int(local_asn) + if router_id: + json_data["openconfig-network-instance:router-id"] = router_id + if keep_alive: + json_data["openconfig-bgp-ext:keepalive-interval"] = str(keep_alive) + if hold: + json_data["openconfig-bgp-ext:hold-time"] = str(hold) + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=json_data): + st.error("Error in configuring router BGP") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -349,6 +456,57 @@ def create_bgp_neighbor(dut, local_asn, neighbor_ip, remote_asn, keep_alive=60, commands.append("exit") commands.append("exit") st.config(dut, commands, type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + url = rest_urls['bgp_config'].format(vrf) + data=dict() + data["openconfig-network-instance:bgp"]=dict() + data["openconfig-network-instance:bgp"]["global"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"].update({"as":int(local_asn)}) + neigh_data = dict() + neigh_data["openconfig-network-instance:neighbors"] = dict() + neigh_data["openconfig-network-instance:neighbors"]["neighbor"] = list() + neighbors = dict() + neighbors["neighbor-address"] = neighbor_ip + neighbors["config"] = dict() + neighbors["config"]["neighbor-address"] = neighbor_ip + if str(remote_asn).isdigit(): + neighbors["config"]["peer-as"] = int(remote_asn) + else: + if remote_asn == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + neighbors["config"]["peer-type"]=peer_type + + neighbors["timers"] = dict() + neighbors["timers"]["config"] = dict() + neighbors["timers"]["config"]["hold-time"] = str(hold) + neighbors["timers"]["config"]["keepalive-interval"] = str(keep_alive) + neighbors["afi-safis"] = dict() + if password: + neighbors["openconfig-bgp-ext:auth-password"] = dict() + neighbors["openconfig-bgp-ext:auth-password"]["config"] = dict() + neighbors["openconfig-bgp-ext:auth-password"]["config"]["password"] = password + if family: + if family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + neighbors["afi-safis"]["afi-safi"] = list() + afi_safi_data = dict() + afi_safi_data["afi-safi-name"] = afi_safi_name + afi_safi_data["config"] = dict() + afi_safi_data["config"]["afi-safi-name"] = afi_safi_name + afi_safi_data["config"]["enabled"] = True + neighbors["afi-safis"]["afi-safi"].append(afi_safi_data) + neigh_data["openconfig-network-instance:neighbors"]["neighbor"].append(neighbors) + data["openconfig-network-instance:bgp"].update(neigh_data) + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=data): + st.error("Error in configuring router neighbor") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -401,6 +559,63 @@ def config_bgp_neighbor(dut, local_asn, neighbor_ip, remote_asn, family="ipv4", commands.append("exit") #exit router-bgp st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) return True + elif cli_type in ["rest-patch", "rest-put"]: + + rest_urls = st.get_datastore(dut, 'rest_urls') + data = dict() + data["openconfig-network-instance:bgp"] = dict() + data["openconfig-network-instance:bgp"]["global"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"].update({"as": int(local_asn)}) + if family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + if config == "yes": + url = rest_urls['bgp_neighbor_config'].format(vrf) + neigh_data = dict() + neigh_data["openconfig-network-instance:neighbors"] = dict() + neigh_data["openconfig-network-instance:neighbors"]["neighbor"] = list() + neighbors = dict() + neighbors["neighbor-address"] = neighbor_ip + neighbors["config"] = dict() + neighbors["config"]["neighbor-address"] = neighbor_ip + + if str(remote_asn).isdigit(): + neighbors["config"]["peer-as"] = int(remote_asn) + else: + if remote_asn == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + neighbors["config"]["peer-type"] = peer_type + + neighbors["timers"] = dict() + neighbors["timers"]["config"] = dict() + neighbors["timers"]["config"]["hold-time"] = str(hold) + neighbors["timers"]["config"]["keepalive-interval"] = str(keep_alive) + neighbors["timers"]["config"]["connect-retry"] = str(connect_retry) + neighbors["afi-safis"] = dict() + neighbors["afi-safis"]["afi-safi"] = list() + afi_safi_data = dict() + afi_safi_data["afi-safi-name"] = afi_safi_name + afi_safi_data["config"] = dict() + afi_safi_data["config"]["afi-safi-name"] = afi_safi_name + afi_safi_data["config"]["enabled"] = True + neighbors["afi-safis"]["afi-safi"].append(afi_safi_data) + neigh_data["openconfig-network-instance:neighbors"]["neighbor"].append(neighbors) + data["openconfig-network-instance:bgp"].update(neigh_data) + url = rest_urls['bgp_config'].format(vrf) + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=data): + st.error("Error in configuring router neighbor") + return False + return True + else: + url = rest_urls['bgp_neighbor_config'].format(vrf) + if not delete_rest(dut, rest_url=url): + st.error("Unconfiguring neighbor failed") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -421,6 +636,7 @@ def config_bgp_neighbor_properties(dut, local_asn, neighbor_ip, family=None, mod properties = kwargs peergroup = properties.get('peergroup', None) cli_type = get_cfg_cli_type(dut, **kwargs) + cli_type = "klish" if cli_type in ["rest-patch","rest-put"] else cli_type skip_error_check = kwargs.get("skip_error_check", True) # Add validation for IPV4 / IPV6 address config_router_bgp_mode(dut, local_asn, cli_type=cli_type) @@ -508,6 +724,20 @@ def delete_bgp_neighbor(dut, local_asn, neighbor_ip, remote_asn, vrf='default', commands.append("no neighbor {}".format(neighbor_ip)) commands.append("exit") st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) + elif cli_type in ["rest-patch", "rest-put"]: + result = True + rest_urls = st.get_datastore(dut, 'rest_urls') + url = rest_urls['bgp_peer_as_config'].format(vrf, neighbor_ip) + if not delete_rest(dut, rest_url=url): + st.error("Error in Unconfiguring AS number") + result = False + url = rest_urls['bgp_del_neighbor_config'].format(vrf, neighbor_ip) + if not delete_rest(dut, rest_url=url): + st.error("Error in Unconfiguring neighbor number") + result = False + if not result: + return False + return True else: st.error("UNSUPPORTE CLI TYPE -- {}".format(cli_type)) return False @@ -547,6 +777,23 @@ def change_bgp_neighbor_admin_status(dut, local_asn, neighbor_ip, operation=1, c st.error("Invalid operation provided.") return False st.config(dut, command, type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_neighbor_config'].format(vrf, neighbor_ip) + neigh_data = dict() + neigh_data["openconfig-network-instance:neighbors"] = dict() + neigh_data["openconfig-network-instance:neighbors"]["neighbor"] = list() + neighbors = dict() + neighbors["neighbor-address"] = neighbor_ip + neighbors["config"] = dict() + neighbors["config"]["neighbor-address"] = neighbor_ip + neighbors["config"]["enabled"] = True if operation == 0 else False + neigh_data["openconfig-network-instance:neighbors"]["neighbor"].append(neighbors) + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=neigh_data): + st.error("Error in configuring router neighbor") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -562,6 +809,7 @@ def advertise_bgp_network(dut, local_asn, network, route_map='', config='yes', f :return: """ cli_type = get_cfg_cli_type(dut, cli_type=cli_type) + cli_type = "klish" if cli_type in ["rest-patch","rest-put"] else cli_type st.log("Advertise BGP network ..") # Add validation for IPV4 / IPV6 address config_router_bgp_mode(dut, local_asn, cli_type=cli_type) @@ -601,6 +849,7 @@ def config_bgp_network_advertise(dut, local_asn, network, route_map='', addr_fam skip_error_check=True, network_import_check=False): cli_type = get_cfg_cli_type(dut, cli_type=cli_type) + cli_type = "klish" if cli_type in ["rest-patch", "rest-put"] else cli_type cfgmode = 'no' if config != 'yes' else '' if cli_type == "vtysh": command = "router bgp {}".format(local_asn) @@ -633,18 +882,29 @@ def show_bgp_ipv4_summary_vtysh(dut, vrf='default', **kwargs): :return: """ cli_type = get_show_cli_type(dut, **kwargs) + skip_tmpl = kwargs.get("skip_tmpl", False) if cli_type == "vtysh": if vrf == 'default': command = "show ip bgp summary" else: command = "show ip bgp vrf {} summary".format(vrf) - return st.show(dut, command, type='vtysh') + return st.show(dut, command, type='vtysh', skip_tmpl=skip_tmpl) elif cli_type == "klish": if vrf == 'default': command = "show bgp ipv4 unicast summary" else: command = "show bgp ipv4 unicast vrf {} summary".format(vrf) - return st.show(dut, command, type=cli_type) + return st.show(dut, command, type=cli_type, skip_tmpl=skip_tmpl) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + + url = rest_urls['bgp_config'].format(vrf) + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + return parse_bgp_summary_output(output) + else: + return [] else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return [] @@ -668,10 +928,72 @@ def show_bgp_ipv6_summary_vtysh(dut, vrf='default', **kwargs): else: command = "show bgp ipv6 unicast vrf {} summary".format(vrf) return st.show(dut, command, type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + + url = rest_urls['bgp_config'].format(vrf) + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + return parse_bgp_summary_output(output) + else: + return [] + #st.log(output) + #return parse_bgp_summary_output(output) else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return [] +def parse_bgp_summary_output(output): + st.log("parse output ") + response = list() + temp_data = list() + if not output: + return response + bgp_data = output["openconfig-network-instance:bgp"] + established_cnt = 0 + empty_values = ['peers', 'ribentries', 'peersmemoryinkbytes', 'dynnbr', 'ribmemoryinbytes', 'dynlimit', 'tblver'] + if "neighbors" in bgp_data: + if "neighbor" in bgp_data["neighbors"]: + neighbors = bgp_data["neighbors"]["neighbor"] + for neighbor in neighbors: + show_output = dict() + show_output["localasnnumber"] = neighbor["config"]["local-as"] if "local-as" in neighbor["config"] else (neighbor["state"]["local-as"] if "local-as" in neighbor["state"] else "") + if "queues" in neighbor["state"]: + show_output["outq"] = neighbor["state"]["queues"]["output"] + show_output["inq"] = neighbor["state"]["queues"]["input"] + else: + show_output["outq"] = show_output["inq"] = 0 + sent_msg_cnt = rcv_msg_cnt = 0 + if "messages" in neighbor["state"]: + sent_msgs = neighbor["state"]["messages"]["sent"] + rcv_msgs = neighbor["state"]["messages"]["received"] + else: + sent_msg_cnt = rcv_msg_cnt = 0 + for _, value in sent_msgs.items(): + sent_msg_cnt = sent_msg_cnt + int(value) + show_output["msgsent"] = sent_msg_cnt + #rcv_msgs = neighbor["state"]["messages"]["received"] + for key, value in rcv_msgs.items(): + rcv_msg_cnt = rcv_msg_cnt + int(value) + show_output["msgrcvd"] = rcv_msg_cnt + show_output["updown"] = convert_microsecs_to_time( + neighbor["state"]["last-established"]) if "last-established" in neighbor["state"] else "never" + show_output["routerid"] = bgp_data["global"]["config"]["router-id"] if "router-id" in bgp_data["global"]["config"] else (bgp_data["global"]["state"]["router-id"] if "router-id" in bgp_data["global"]["state"] else "") + show_output["state"] = neighbor["state"]["session-state"] if "session-state" in neighbor["state"] else "" + show_output["version"] = 4 + show_output["vrfid"] = "default" + show_output["neighbor"] = neighbor["neighbor-address"] + show_output["asn"] = neighbor["config"]["peer-as"] if "peer-as" in neighbor["config"] else (neighbor["state"]["peer-as"] if "peer-as" in neighbor["state"] else "") + if show_output["state"] == "ESTABLISHED": + established_cnt = established_cnt + 1 + for keys in empty_values: + show_output[keys] = "" + temp_data.append(show_output) + for data in temp_data: + data.update({"estd_nbr": established_cnt, "total_nbr": len(temp_data)}) + response.append(data) + return response def show_bgp_ipv4_summary(dut, **kwargs): """ @@ -686,6 +1008,17 @@ def show_bgp_ipv4_summary(dut, **kwargs): command = "show bgp ipv4 summary" elif cli_type == "klish": command = 'show bgp ipv4 unicast summary' + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_config'].format(vrf) + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + return parse_bgp_summary_output(output) + else: + return [] + else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return [] @@ -705,6 +1038,17 @@ def show_bgp_ipv6_summary(dut, **kwargs): command = "show bgp ipv6 summary" elif cli_type == "klish": command = 'show bgp ipv6 unicast summary' + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_config'].format(vrf) + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + return parse_bgp_summary_output(output) + else: + return [] + else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return [] @@ -780,13 +1124,33 @@ def show_bgp_ipv4_neighbor_vtysh(dut, neighbor_ip=None,vrf='default', **kwargs): command = "show ip bgp vrf {} neighbors".format(vrf) if neighbor_ip: command += " {}".format(neighbor_ip) - if cli_type == 'klish': + elif cli_type == 'klish': if vrf == 'default': command = "show bgp ipv4 unicast neighbors" else: command = "show bgp ipv4 unicast vrf {} neighbors".format(vrf) if neighbor_ip: command += " {}".format(neighbor_ip) + elif cli_type in ["rest-patch", "rest-put"]: + rest_url = st.get_datastore(dut, 'rest_urls') + #Getting router id + url = rest_url["bgp_routerid_state"].format(vrf) + output_router_id = get_rest(dut, rest_url=url) + if output_router_id and rest_status(output_router_id["status"]): + output_router_id = output_router_id["output"] + else: + output_router_id ={} + router_id = "" + if output_router_id: + router_id = output_router_id["openconfig-network-instance:router-id"] + url = rest_url['bgp_neighbor_config'].format(vrf) + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + return _parse_ip_bgp_data(output, "ipv4", router_id) + else: + return [] + return st.show(dut, command, type=cli_type) @@ -805,15 +1169,162 @@ def show_bgp_ipv6_neighbor_vtysh(dut, neighbor_ip=None,vrf='default', **kwargs): command = "show bgp vrf {} ipv6 neighbors".format(vrf) if neighbor_ip: command += " {}".format(neighbor_ip) - if cli_type == 'klish': + elif cli_type == 'klish': if vrf == 'default': command = "show bgp ipv6 unicast neighbors" else: command = "show bgp ipv6 unicast vrf {} neighbors".format(vrf) if neighbor_ip: command += " {}".format(neighbor_ip) + elif cli_type in ["rest-patch", "rest-put"]: + rest_url = st.get_datastore(dut, 'rest_urls') + # Getting router id + url = rest_url["bgp_routerid_state"].format(vrf) + output_router_id = get_rest(dut, rest_url=url) + if output_router_id and rest_status(output_router_id["status"]): + output_router_id = output_router_id["output"] + else: + output_router_id = {} + router_id = "" + if output_router_id: + router_id = output_router_id["openconfig-network-instance:router-id"] + url = rest_url['bgp_neighbor_config'].format(vrf) + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + return _parse_ip_bgp_data(output, "ipv4", router_id) + else: + return [] return st.show(dut, command, type=cli_type) +def _parse_ip_bgp_data(output, family, router_id): + """ + Common function to parse BGP neighbors data + :param output: + :param family: + :param router_id: + :return: + """ + result = list() + if output: + if not router_id: + return result + neighbors = output["openconfig-network-instance:neighbors"] + if "neighbor" in neighbors: + neighbor_data = neighbors["neighbor"] + for neighbor in neighbor_data: + afi_safi_data = neighbor["afi-safis"]["afi-safi"] + for afi_safi in afi_safi_data: + if family in afi_safi["afi-safi-name"].lower(): + show_output = dict() + show_output["localrouterid"] = router_id + show_output["updatercvd"] = show_output["updatesent"] = show_output["openrcvd"] = show_output[ + "opensent"] = \ + show_output["routerefreshrcvd"] = show_output["routerefreshsent"] = show_output[ + "capabilityrcvd"] = \ + show_output["capabilitysent"] = \ + show_output["keepalivercvd"] = show_output["keepalivesent"] = show_output["inqdepth"] = \ + show_output[ + "outqdepth"] = \ + show_output["holdtime"] = show_output["keepalive"] = show_output["bgplastreset"] = 0 + show_output["peergroup"] = show_output["neighborip"] = show_output["remrouterid"] = show_output[ + "lastread"] = \ + show_output["lastwrite"] = show_output["bfdstatus"] = show_output["localrouterid"] = "" + show_output["state"] = "IDLE" + keys = ["bgpversion", "bfdrxintr", "bfdtxintr", "subgrp", "updtgrp", "l2vpnevpnncap", + "ipv4ucastncap", + "bfdmultiplier", "acceptprefix", "grcapability", "senttotal", "rcvdtotal", "bfdtype", + "pktql", + "endofribsend", "endofribrcv"] + for key in keys: + show_output[key] = "" + show_output["neighborip"] = neighbor[ + "neighbor-address"] if "neighbor-address" in neighbor else "" + if "config" in neighbor: + show_output["peergroup"] = neighbor["config"]["peer-group"] if "peer-group" in neighbor[ + "config"] else "" + show_output["localasn"] = neighbor["config"]["local-as"] if "local-as" in neighbor[ + "config"] else "" + show_output["remoteasn"] = neighbor["config"]["peer-as"] if "peer-as" in neighbor[ + "config"] else "" + show_output["bgpdownreason"] = neighbor["config"][ + "openconfig-bgp-ext:shutdown-message"] if "openconfig-bgp-ext:shutdown-message" in \ + neighbor["config"] else "" + + if "state" in neighbor: + if "messages" in neighbor["state"]: + messages = neighbor["state"]["messages"] + if "received" in messages: + show_output["updatercvd"] = messages["received"]["UPDATE"] if "UPDATE" in messages[ + "received"] else 0 + show_output["openrcvd"] = messages["received"][ + "openconfig-bgp-ext:open"] if "openconfig-bgp-ext:open" in messages[ + "received"] else 0 + show_output["routerefreshrcvd"] = messages["received"][ + "openconfig-bgp-ext:route-refresh"] if "openconfig-bgp-ext:route-refresh" in \ + messages["received"] else 0 + show_output["capabilityrcvd"] = messages["received"][ + "openconfig-bgp-ext:capablity"] if "openconfig-bgp-ext:capablity" in messages[ + "received"] else 0 + show_output["keepalivercvd"] = messages["received"][ + "openconfig-bgp-ext:keepalive"] if "openconfig-bgp-ext:keepalive" in messages[ + "received"] else 0 + show_output["notificationrcvd"] = messages["received"][ + "NOTIFICATION"] if "NOTIFICATION" in messages["received"] else 0 + if "sent" in messages: + show_output["updatesent"] = messages["sent"]["UPDATE"] if "UPDATE" in messages[ + "sent"] else 0 + show_output["opensent"] = messages["sent"][ + "openconfig-bgp-ext:open"] if "openconfig-bgp-ext:open" in messages[ + "sent"] else 0 + show_output["routerefreshsent"] = messages["sent"][ + "openconfig-bgp-ext:route-refresh"] if "openconfig-bgp-ext:route-refresh" in \ + messages[ + "sent"] else 0 + show_output["capabilitysent"] = messages["sent"][ + "openconfig-bgp-ext:capablity"] if "openconfig-bgp-ext:capablity" in messages[ + "sent"] else 0 + show_output["keepalivesent"] = messages["sent"][ + "openconfig-bgp-ext:keepalive"] if "openconfig-bgp-ext:keepalive" in messages[ + "sent"] else 0 + show_output["notificationsent"] = messages["sent"][ + "NOTIFICATION"] if "NOTIFICATION" in \ + messages[ + "sent"] else 0 + show_output["state"] = neighbor["state"]["session-state"] if "session-state" in neighbor[ + "state"] else "IDLE" + show_output["remrouterid"] = neighbor["state"][ + "openconfig-bgp-ext:remote-router-id"] if "openconfig-bgp-ext:remote-router-id" in \ + neighbor[ + "state"] else "" + show_output["lastread"] = neighbor["state"][ + "openconfig-bgp-ext:last-read"] if "openconfig-bgp-ext:last-read" in neighbor[ + "state"] else "" + show_output["lastwrite"] = neighbor["state"][ + "openconfig-bgp-ext:last-write"] if "openconfig-bgp-ext:last-write" in neighbor[ + "state"] else "" + show_output["bgplastreset"] = neighbor["state"][ + "openconfig-bgp-ext:last-reset-time"] if "openconfig-bgp-ext:last-reset-time" in \ + neighbor[ + "state"] else "" + if "queues" in neighbor["state"]: + show_output["inqdepth"] = neighbor["state"]["queues"]["input"] + show_output["outqdepth"] = neighbor["state"]["queues"]["output"] + if "timers" in neighbor: + if "config" in neighbor["timers"]: + show_output["holdtime"] = neighbor["timers"]["config"]["hold-time"] if "hold-time" in \ + neighbor[ + "timers"][ + "config"] else 0 + show_output["keepalive"] = neighbor["timers"]["config"][ + "keepalive-interval"] if "keepalive-interval" in neighbor["timers"]["config"] else 0 + if "openconfig-bfd:enable-bfd" in neighbor: + if "config" in neighbor["openconfig-bfd:enable-bfd"]: + bfd_status = neighbor["openconfig-bfd:enable-bfd"]["config"]["enabled"] + show_output["bfdstatus"] = "" if not bfd_status else bfd_status + result.append(show_output) + return result + def clear_ip_bgp(dut, **kwargs): """ @@ -822,6 +1333,7 @@ def clear_ip_bgp(dut, **kwargs): :return: """ cli_type = get_cfg_cli_type(dut, **kwargs) + cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type if cli_type in ["click", "vtysh"]: # command = "sonic-clear ip bgp" command = "clear ip bgp *" @@ -829,6 +1341,14 @@ def clear_ip_bgp(dut, **kwargs): elif cli_type == 'klish': command = 'clear bgp ipv4 unicast *' st.config(dut, command, type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_config'].format(vrf) + if not delete_rest(dut, rest_url=url): + st.error("Clearing BGP config failed") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -843,6 +1363,7 @@ def clear_bgp_vtysh(dut, **kwargs): :return: """ cli_type = get_cfg_cli_type(dut, **kwargs) + cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type address_family = kwargs.get('address_family', 'all') af_list = ['ipv4','ipv6'] if address_family == 'ipv4': @@ -870,6 +1391,7 @@ def clear_bgp_vtysh(dut, **kwargs): def clear_ip_bgp_vtysh(dut, value="*", **kwargs): cli_type = get_cfg_cli_type(dut, **kwargs) + cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type if cli_type == 'vtysh': command = "clear ip bgp ipv4 {}".format(value) st.config(dut, command, type='vtysh', conf=False) @@ -883,6 +1405,7 @@ def clear_ip_bgp_vtysh(dut, value="*", **kwargs): def clear_ipv6_bgp_vtysh(dut, value="*", **kwargs): cli_type = get_cfg_cli_type(dut, **kwargs) + cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type if cli_type == 'vtysh': command = "clear ip bgp ipv6 {}".format(value) elif cli_type == 'klish': @@ -891,6 +1414,7 @@ def clear_ipv6_bgp_vtysh(dut, value="*", **kwargs): def clear_ip_bgp_vrf_vtysh(dut,vrf,family='ipv4',value="*", **kwargs): cli_type = get_cfg_cli_type(dut, **kwargs) + cli_type = "klish" if cli_type in ["rest-put", "rest-patch"] else cli_type if cli_type == 'vtysh': command = "clear bgp vrf {} {} {}".format(vrf,family,value) st.config(dut, command, type='vtysh', conf=False) @@ -949,6 +1473,49 @@ def create_bgp_aggregate_address(dut, **kwargs): commands.append("exit") commands.append("exit") st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + if kwargs["family"] == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + if kwargs.get("config") == "add": + url = rest_urls['bgp_config'].format(vrf) + bgp_data = dict() + bgp_data["openconfig-network-instance:bgp"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(kwargs["local_asn"]) + bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + afi_safi_data = list() + afi_safi = dict() + afi_safi["afi-safi-name"] = afi_safi_name + afi_safi["config"] = dict() + afi_safi["config"]["afi-safi-name"] = afi_safi_name + afi_safi["openconfig-bgp-ext:aggregate-address-config"] = dict() + afi_safi["openconfig-bgp-ext:aggregate-address-config"]["aggregate-address"] = list() + aggregate_data = dict() + aggregate_data["prefix"] = kwargs["address_range"] + aggregate_data["config"] = dict() + aggregate_data["config"]["prefix"] = kwargs["address_range"] + if kwargs.get("summary"): + aggregate_data["config"]["summary-only"] = True + if kwargs.get("as_set"): + aggregate_data["config"]["as-set"] = True + afi_safi["openconfig-bgp-ext:aggregate-address-config"]["aggregate-address"].append(aggregate_data) + afi_safi_data.append(afi_safi) + bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = afi_safi_data + if not config_rest(dut, rest_url=url,http_method=cli_type, json_data=bgp_data): + st.error("Error in configuring aggregate address") + return False + return True + else: + url = rest_urls["bgp_aggregate_address_config"].format(name=vrf, afi_safi_name=afi_safi_name, prefix=kwargs["address_range"]) + if not delete_rest(dut,rest_url= url): + st.error("Error in deleting aggregate address") + return False + return True else: st.error("Unsupported CLI TYPE -- {}".format(cli_type)) return False @@ -962,9 +1529,29 @@ def create_bgp_update_delay(dut, local_asn, time=0, cli_type="", skip_error_chec :return: """ cli_type = get_cfg_cli_type(dut, cli_type=cli_type) - config_router_bgp_mode(dut, local_asn, cli_type=cli_type) - command = "update-delay {}".format(time) - st.config(dut, command,type=cli_type, skip_error_check=skip_error_check) + if cli_type in ["click", "vtysh", "klish"]: + config_router_bgp_mode(dut, local_asn, cli_type=cli_type) + command = "update-delay {}".format(time) + st.config(dut, command,type=cli_type, skip_error_check=skip_error_check) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_config'].format(vrf) + bgp_data = dict() + bgp_data["openconfig-network-instance:bgp"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:update-delay"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:update-delay"]["config"] = dict() + bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:update-delay"]["config"]["max-delay"] = int(time) + if not config_rest(dut, rest_url=url,http_method=cli_type, json_data=bgp_data): + st.error("Error in configuring update delay") + return False + return True + else: + st.error("UNSUPPORTED CLI TYPE") + return False def create_bgp_always_compare_med(dut, local_asn): @@ -996,6 +1583,44 @@ def create_bgp_best_path(dut, local_asn, user_command, cli_type=""): command = list() command.append("bestpath {}".format(user_command)) command.append("exit") + elif cli_type in ["rest-put", "rest-patch"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_config'].format(vrf) + route_selection_data = dict() + route_selection_data["openconfig-network-instance:bgp"] = dict() + if user_command == "compare-routerid": + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"]["config"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"]["config"]["external-compare-router-id"] = True + elif user_command == "as-path confed": + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"]["config"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"][ + "config"]["openconfig-bgp-ext:compare-confed-as-path"] = True + elif user_command == "as-path ignore": + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"]["config"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"][ + "config"]["ignore-as-path-length"] = True + elif user_command == "as-path multipath-relax": + route_selection_data["openconfig-network-instance:bgp"]["global"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"]["config"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"]["config"]["allow-multiple-as"] = True + elif user_command == "med confed": + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"]["config"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"][ + "config"]["openconfig-bgp-ext:med-confed"] = True + elif user_command == "med confed missing-as-worst": + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"]["config"] = dict() + route_selection_data["openconfig-network-instance:bgp"]["route-selection-options"][ + "config"]["openconfig-bgp-ext:med-missing-as-worst"] = True + else: + st.error("Unsupporte user command") + return False + if not config_rest(dut,http_method=cli_type, rest_url=url, json_data=route_selection_data): + st.error("Error in configuring best path") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1031,6 +1656,23 @@ def create_bgp_client_to_client_reflection(dut, local_asn, config='yes', cli_typ commands.append("exit") st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) return True + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_global_config'].format(vrf) + + if cfgmode != 'no': + client_client_reflection = True + else: + client_client_reflection = False + global_data = dict() + global_data["openconfig-network-instance:config"] = dict() + global_data["openconfig-network-instance:config"]["as"] = int(local_asn) + global_data["openconfig-network-instance:config"]["openconfig-bgp-ext:clnt-to-clnt-reflection"] = client_client_reflection + if not config_rest(dut,http_method=cli_type, rest_url=url, json_data=global_data): + st.error("Error in configuring client to client reflection") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1038,7 +1680,6 @@ def create_bgp_client_to_client_reflection(dut, local_asn, config='yes', cli_typ def create_bgp_route_reflector_client(dut, local_asn, addr_family, nbr_ip, config='yes', cli_type="", skip_error_check=True): """ - :param dut: :param local_asn: :param addr_family: @@ -1051,31 +1692,13 @@ def create_bgp_route_reflector_client(dut, local_asn, addr_family, nbr_ip, confi command = "router bgp {}".format(local_asn) command += "\n address-family {} {}".format(addr_family, "unicast") command += "\n {} neighbor {} route-reflector-client".format(cfgmode, nbr_ip) - st.config(dut, command, type=cli_type) return True elif cli_type == "klish": addr_family_type = "unicast" neigh_name = nbr_ip - # if re.findall(r'Ethernet|Vlan|PortChannel', nbr_ip): - # neigh_name = get_interface_number_from_name(nbr_ip) commands = list() commands.append("router bgp {}".format(local_asn)) - - # if re.findall(r'Ethernet|Vlan|PortChannel', nbr_ip): - # neigh_name = get_interface_number_from_name(nbr_ip) - # commands.append("{} neighbor interface {} {}".format(cfgmode, neigh_name["type"], neigh_name["number"])) - # elif is_valid_ip_address(neigh_name, addr_family): - # commands.append("{} neighbor {}".format(cfgmode, nbr_ip)) - # else: - # commands.append("{} peer-group {}".format(cfgmode, nbr_ip)) - # if config == "yes": - # if addr_family == 'l2vpn' : addr_family_type = "evpn" - # commands.append("address-family {} {}".format(addr_family, addr_family_type)) - # commands.append("{} route-reflector-client".format(cfgmode)) - # commands.append("exit") - # else: - # commands.append("exit") if re.findall(r'Ethernet|Vlan|PortChannel|Eth', nbr_ip): neigh_name = get_interface_number_from_name(nbr_ip) commands.append("neighbor interface {} {}".format( neigh_name["type"], neigh_name["number"])) @@ -1091,9 +1714,63 @@ def create_bgp_route_reflector_client(dut, local_asn, addr_family, nbr_ip, confi commands.append("exit") commands.append("exit") commands.append("exit") - st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) return True + elif cli_type in ["rest-patch", "rest-put"]: + + rest_urls = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_urls['bgp_config'].format(vrf) + route_reflector_data = dict() + route_reflector_data["openconfig-network-instance:bgp"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["global"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + + if addr_family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + elif addr_family == "l2vpn": + afi_safi_name = "L2VPN_EVPN" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + + common_data = dict() + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + afi_safi_data = dict() + afi_safi_data["afi-safi-name"] = afi_safi_name + afi_safi_data["config"] = dict() + afi_safi_data["config"]["afi-safi-name"] = afi_safi_name + if cfgmode != 'no': + route_reflector_client = True + else: + route_reflector_client = False + afi_safi_data["config"]["openconfig-bgp-ext:route-reflector-client"] = route_reflector_client + + common_data["afi-safis"]["afi-safi"].append(afi_safi_data) + if re.findall(r'Ethernet|Vlan|PortChannel|Eth', nbr_ip) or addr_family == 'l2vpn' or is_valid_ip_address(nbr_ip, addr_family): + route_reflector_data["openconfig-network-instance:bgp"]["neighbors"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() + neigh_data = dict() + neigh_data["neighbor-address"] = nbr_ip + neigh_data["config"]=dict() + neigh_data["config"]["neighbor-address"]= nbr_ip + neigh_data.update(common_data) + route_reflector_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + else: + route_reflector_data["openconfig-network-instance:bgp"]["peer-groups"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() + peer_data = dict() + peer_data["peer-group-name"] = nbr_ip + peer_data["config"] = dict() + peer_data["config"]["peer-group-name"] = nbr_ip + peer_data.update(common_data) + route_reflector_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) + + if not config_rest(dut, rest_url=url, http_method=cli_type,json_data=route_reflector_data): + st.error("Error while configuring route reflector client") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1140,7 +1817,7 @@ def create_bgp_next_hop_self(dut, local_asn, addr_family, nbr_ip, force='no', co commands.append("{} neighbor {}".format(cfgmode, nbr_ip)) else: commands.append("{} peer-group {}".format(cfgmode, nbr_ip)) - #commands.append("address-family {} {}".format(addr_family, "unicast")) + if config == "yes": force_cmd = "force" if force == 'yes' else "" commands.append("address-family {} {}".format(addr_family, "unicast")) @@ -1150,6 +1827,73 @@ def create_bgp_next_hop_self(dut, local_asn, addr_family, nbr_ip, force='no', co commands.append("exit") st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) return True + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + + vrf = "default" + if not cfgmode: + url = rest_urls['bgp_config'].format(vrf) + route_reflector_data = dict() + route_reflector_data["openconfig-network-instance:bgp"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["global"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + if config == "yes": + if addr_family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + + common_data = dict() + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + afi_safi_data = dict() + afi_safi_data["afi-safi-name"] = afi_safi_name + afi_safi_data["config"] = dict() + afi_safi_data["config"]["afi-safi-name"] = afi_safi_name + afi_safi_data["openconfig-bgp-ext:next-hop-self"] = dict() + afi_safi_data["openconfig-bgp-ext:next-hop-self"]["config"] = dict() + force_cmd = True if force == 'yes' else False + afi_safi_data["openconfig-bgp-ext:next-hop-self"]["config"]["force"] = force_cmd + afi_safi_data["openconfig-bgp-ext:next-hop-self"]["config"]["enabled"] = True if force != "yes" else False + common_data["afi-safis"]["afi-safi"].append(afi_safi_data) + if is_valid_ip_address(nbr_ip, addr_family): + route_reflector_data["openconfig-network-instance:bgp"]["neighbors"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() + neigh_data = dict() + neigh_data["neighbor-address"] = nbr_ip + neigh_data["config"]=dict() + neigh_data["config"]["neighbor-address"] = nbr_ip + neigh_data.update(common_data) + route_reflector_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + else: + route_reflector_data["openconfig-network-instance:bgp"]["peer-groups"] = dict() + route_reflector_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() + peer_data = dict() + peer_data["peer-group-name"] = nbr_ip + peer_data["config"] = dict() + peer_data["config"]["peer-group-name"] = nbr_ip + peer_data.update(common_data) + route_reflector_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) + + + if not config_rest(dut, rest_url=url, http_method=cli_type,json_data=route_reflector_data): + st.error("Error while configuring next hop self") + return False + return True + else: + result = True + url = rest_urls['bgp_del_neighbor_config'].format(vrf,nbr_ip) + if not delete_rest(dut, rest_url=url): + st.error("Deleting neighbor with address failed") + result = False + url = rest_urls['bgp_peer_group_name_config'].format(vrf,nbr_ip) + if not delete_rest(dut, rest_url=url): + st.error("Deleting peergroup with name failed") + result = False + if not result: + return False + return True else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1235,6 +1979,46 @@ def config_bgp_default(dut, local_asn, user_command, config='yes', cli_type="", commands.append("exit") st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) return True + elif cli_type in ["rest-patch", "rest-put"]: + + data = dict() + data["openconfig-network-instance:bgp"]=dict() + data["openconfig-network-instance:bgp"]["global"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"].update({"as": int(local_asn)}) + data["openconfig-network-instance:bgp"]["global"]["afi-safis"]=dict() + data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"]=list() + data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:global-defaults"]=dict() + data_sub=dict() + data_sub["config"]=dict() + + if user_command == "ipv4-unicast": + data_sub["config"]["ipv4-unicast"] = True if cfgmode == 'yes' else False + elif "local-preference" in user_command: + if cfgmode == "yes": + data_sub["config"]["local-preference"] = int(user_command.replace('local-preference', '').strip()) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_local_pref'] + if not delete_rest(dut, rest_url=url.format("default")): + st.error("failed to delete local-pref") + + elif user_command == "show-hostname": + data_sub["config"]["show-hostname"] = True if cfgmode == 'yes' else False + elif user_command == "shutdown": + data_sub["config"]["shutdown"] = True if cfgmode == 'yes' else False + elif "subgroup-pkt-queue-max" in user_command: + if cfgmode == "yes": + data_sub["config"]["subgroup-pkt-queue-max"] = int(user_command.replace("subgroup-pkt-queue-max", "").strip()) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_subgrp'] + if not delete_rest(dut, rest_url=url.format("default")): + st.error("failed to delete subgroup") + data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:global-defaults"].update(data_sub) + url = st.get_datastore(dut,"rest_urls")['bgp_config'].format("default") + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data): + st.error("bgp default router config failed") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1250,11 +2034,13 @@ def config_bgp_always_compare_med(dut, local_asn, config='yes', cli_type=""): """ cli_type = get_cfg_cli_type(dut, cli_type=cli_type) config_router_bgp_mode(dut, local_asn, cli_type=cli_type) + if cli_type == "vtysh": if config == 'yes' : command = "bgp always-compare-med" else : command = "no bgp always-compare-med" + st.config(dut, command, type=cli_type) elif cli_type == 'klish': command = list() if config == 'yes' : @@ -1262,10 +2048,25 @@ def config_bgp_always_compare_med(dut, local_asn, config='yes', cli_type=""): else : command.append("no always-compare-med") command.append('exit') + st.config(dut, command, type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + if config == 'yes': + url = st.get_datastore(dut, "rest_urls")["bgp_config"] + data = {"openconfig-network-instance:bgp": {"global": {"config": {"as": int(local_asn)}, + "route-selection-options": { + "config": {"always-compare-med": True}}}}} + if not config_rest(dut,rest_url=url.format("default"),http_method=cli_type,json_data=data): + st.error("failed to configure always compare med") + return False + else: + url =st.get_datastore(dut,"rest_urls")["bgp_config_med"].format("default") + if not delete_rest(dut, rest_url=url): + st.error("no form of compare med failed") + return False + return True else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False - st.config(dut, command, type=cli_type) return True @@ -1278,6 +2079,7 @@ def config_bgp_deterministic_med(dut, local_asn, config='yes',cli_type=''): """ cli_type = get_cfg_cli_type(dut, cli_type=cli_type) config_router_bgp_mode(dut, local_asn, cli_type=cli_type) + command="" if cli_type == "vtysh": if config == 'yes' : command = "bgp deterministic-med" @@ -1290,6 +2092,19 @@ def config_bgp_deterministic_med(dut, local_asn, config='yes',cli_type=''): else : command.append("no deterministic-med") command.append('exit') + elif cli_type in ["rest-patch", "rest-put"]: + if config == 'yes': + url = st.get_datastore(dut, "rest_urls")["bgp_config"].format("default") + data = {"openconfig-network-instance:bgp": {"global": {"config": {"as": int(local_asn), + "openconfig-bgp-ext:deterministic-med": True}}}} + if not config_rest(dut, rest_url=url, http_method=cli_type, json_data=data): + st.error("failed to configure deterministic-med") + return False + else: + url = st.get_datastore(dut, "rest_urls")["bgp_del_deter_med"].format("default") + if not delete_rest(dut, rest_url=url): + st.error("failed to delete deterministic-med ") + return False else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1323,6 +2138,7 @@ def config_bgp_graceful_restart(dut, **kwargs): vrf = kwargs.get('vrf', "default") skip_error_check = kwargs.get("skip_error_check", True) cli_type = get_cfg_cli_type(dut, **kwargs) + command ="" if "local_asn" not in kwargs and "config" not in kwargs : st.error("Mandatory params not provided") return False @@ -1340,6 +2156,33 @@ def config_bgp_graceful_restart(dut, **kwargs): command += "{} {} graceful-restart preserve-fw-state\n".format(mode, bgp_mode) if cli_type == 'klish': command += "exit\n" + if cli_type in ["rest-patch","rest-put"]: + rest_urls=st.get_datastore(dut,"rest_urls") + data=dict() + data["openconfig-network-instance:bgp"]=dict() + data["openconfig-network-instance:bgp"]["global"]=dict() + data["openconfig-network-instance:bgp"]["global"]["graceful-restart"]=dict() + if mode !='no': + data["openconfig-network-instance:bgp"]["global"]["graceful-restart"].update({"config": {"enabled": True}}) + if preserve_state != None: + data["openconfig-network-instance:bgp"]["global"]["graceful-restart"].update({"config": {"openconfig-bgp-ext:preserve-fw-state": True}}) + else: + url=rest_urls['bgp_del_graceful'].format(vrf) + if not delete_rest(dut,rest_url=url): + st.error("failed unconfig graceful restart") + url=rest_urls['bgp_config'].format(vrf) + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data): + st.error("unable enable graceful restart") + return False + return True + else: + url=rest_urls['bgp_del_grace'].format(vrf) + if not delete_rest(dut,rest_url=url): + st.error("unable to disable graceful restart") + return False + return True + + if not(mode == 'no ' and cli_type == 'vtysh'): if "user_command" in kwargs: command += " {}".format(kwargs["user_command"]) @@ -1358,6 +2201,22 @@ def config_bgp_graceful_shutdown(dut, local_asn, config="add", cli_type="vtysh", bgp_mode = "bgp" if cli_type == "vtysh" else "" command = "{} {} graceful-shutdown".format(mode, bgp_mode) st.config(dut, command, type=cli_type, skip_error_check=skip_error_check) + if cli_type in ["rest-patch","rest-put"]: + rest_urls = st.get_datastore(dut, "rest_urls") + data = {"openconfig-bgp-ext:graceful-shutdown": True} + if mode !='no': + url=rest_urls['bgp_config_graceful_shut'].format("default") + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data): + st.error("failed to config graceful shutdown") + return False + return True + else: + url=rest_urls['bgp_del_graceful_shut'].format("default") + if not delete_rest(dut,rest_url=url): + st.error("failed to delete graceful shutdown") + return False + return True + def config_bgp_listen(dut, local_asn, neighbor_address, subnet, peer_grp_name, limit, config='yes', cli_type="", skip_error_check=True): """ @@ -1392,6 +2251,57 @@ def config_bgp_listen(dut, local_asn, neighbor_address, subnet, peer_grp_name, l command = ["{} listen limit {}".format(mode, limit)] command.append('exit') st.config(dut, command, type=cli_type, skip_error_check=skip_error_check) + elif cli_type in ["rest-patch","rest-put"]: + config_data = dict() + config_data["openconfig-network-instance:bgp"] = dict() + if neighbor_address: + if mode != 'no': + config_data["openconfig-network-instance:bgp"]["global"] = dict() + config_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + config_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + config_data["openconfig-network-instance:bgp"]["peer-groups"] = dict() + config_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() + peer_data = dict() + peer_data.update({"peer-group-name": peer_grp_name}) + peer_data["config"] = dict() + peer_data["config"].update({"peer-group-name": peer_grp_name}) + config_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) + config_data["openconfig-network-instance:bgp"]["global"]["dynamic-neighbor-prefixes"] = dict() + config_data["openconfig-network-instance:bgp"]["global"]["dynamic-neighbor-prefixes"][ + "dynamic-neighbor-prefix"] = list() + prefix_data = dict() + prefix_data.update({"prefix": "{}/{}".format(neighbor_address, subnet)}) + prefix_data["config"] = dict() + prefix_data["config"].update( + {"prefix": "{}/{}".format(neighbor_address, subnet), "peer-group": peer_grp_name}) + config_data["openconfig-network-instance:bgp"]["global"]["dynamic-neighbor-prefixes"][ + "dynamic-neighbor-prefix"].append(prefix_data) + url=st.get_datastore(dut,"rest_urls")['bgp_config'].format("default") + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=config_data): + st.error("unable to config bgp listen") + + else: + + url = st.get_datastore(dut, "rest_urls")['bgp_del_dyn'].format("default") + if not delete_rest(dut, rest_url=url): + st.error("unable to delete BGP listen config") + + if limit: + if mode != 'no': + config_data = {"openconfig-network-instance:max-dynamic-neighbors": limit} + url = st.get_datastore(dut,"rest_urls")['bgp_config_dyn'].format("default") + response = config_rest(dut,rest_url=url,http_method=cli_type,json_data=config_data) + + else: + url=st.get_datastore(dut,"rest_urls")['bgp_del_dyn'].format("default") + response = delete_rest(dut,rest_url=url) + + if not response: + st.log(response) + return False + return True + + else: st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1544,9 +2454,47 @@ def config_bgp_max_med(dut, local_asn, config='yes',**kwargs): command += "no max-med administrative {}\n".format(kwargs['administrative_med']) command += 'exit\n' st.config(dut, command.split("\n"), type=cli_type) + + elif cli_type in ["rest-patch", "rest-put"]: + config_router_bgp_mode(dut, local_asn, cli_type=cli_type) + data = dict() + if config == 'yes': + if 'on_start_time' and 'on_start_med' in kwargs: + data.update({"max-med-val": kwargs['on_start_time'], "time":kwargs['on_start_med']}) + elif 'on_start_time' in kwargs: + data.update({"max-med-val": kwargs['on_start_time']}) + if 'administrative_med' in kwargs: + data.update({"administrative": kwargs['administrative_med']}) + url = st.get_datastore(dut,"rest_urls")['bgp_config'].format("default") + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data): + st.error("failed to confgi max med") + return False + return True + + else: + if 'on_start_time' in kwargs and 'on_start_med' in kwargs: + url = st.get_datastore(dut,"rest_urls")['bgp_del_med'].format("default") + if not delete_rest(dut,rest_url=url): + st.error("failed to delete med on start time") + return False + elif 'on_start_time' in kwargs: + url = st.get_datastore(dut, "rest_urls")['bgp_del_med'].format("default") + if not delete_rest(dut, rest_url=url): + st.error("failed to delete med on start time") + return False + if 'administrative_med' in kwargs: + url = st.get_datastore(dut,"rest_urls")['bgp_del_med_ad'].format("default") + if not delete_rest(dut,rest_url=url): + st.error("failed to delete med on start time") + return False + return True + else: + st.log("Unsupported CLI TYPE - {}".format(cli_type)) + return False return True + def config_route_map_delay_timer(dut, local_asn, timer): """ @@ -1692,7 +2640,14 @@ def create_bgp_peergroup(dut, local_asn, peer_grp_name, remote_asn, keep_alive=6 st.log(response) return False if remote_asn != None: - ocdata = {"openconfig-network-instance:peer-groups":{"peer-group":[{"peer-group-name":peer_grp_name,"config":{"peer-as":int(remote_asn)}}]}} + if str(remote_asn).isdigit(): + ocdata = {"openconfig-network-instance:peer-groups":{"peer-group":[{"peer-group-name":peer_grp_name,"config":{"peer-as":int(remote_asn)}}]}} + else: + if remote_asn == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + ocdata = {"openconfig-network-instance:peer-groups":{"peer-group": [{"peer-group-name": peer_grp_name, "config": {"peer-type": peer_type}}]}} response = config_rest(dut, http_method=http_method, rest_url=rest_url_peergroup, json_data=ocdata) if not response: st.log('Remote-as config in the Peergroup failed') @@ -1849,6 +2804,50 @@ def create_bgp_neighbor_use_peergroup(dut, local_asn, peer_grp_name, neighbor_ip commands.append("exit") commands.append("exit") st.config(dut, commands, type=cli_type, skip_error_check=skip_error_check) + elif cli_type in ["rest-patch","rest_put"]: + if family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + elif family == "l2vpn": + afi_safi_name = "L2VPN_EVPN" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + + data = dict() + data["openconfig-network-instance:bgp"] = dict() + data["openconfig-network-instance:bgp"]["global"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + data["openconfig-network-instance:bgp"]["peer-groups"] = dict() + data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() + peer_data = dict() + peer_data.update({"peer-group-name": peer_grp_name}) + peer_data["config"] = dict() + peer_data["config"].update({"peer-group-name": peer_grp_name}) + data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) + peer_data["afi-safis"] = dict() + peer_data["afi-safis"]["afi-safi"] = list() + peer_sub = dict() + peer_sub.update({"afi-safi-name": afi_safi_name}) + peer_sub["config"] = dict() + peer_sub["config"].update({"afi-safi-name": afi_safi_name, "enabled": True}) + peer_data["afi-safis"]["afi-safi"].append(peer_sub) + data["openconfig-network-instance:bgp"]["neighbors"] = dict() + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + peer_sub.update({"afi-safi-name": afi_safi_name}) + peer_sub["config"].update({"afi-safi-name": afi_safi_name, "enabled": True}) + neigh_data = dict() + neigh_data.update({"neighbor-address": neighbor_ip}) + neigh_data["config"] = dict() + neigh_data["config"].update({"neighbor-address": neighbor_ip, "peer-group": peer_grp_name}) + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + url = st.get_datastore(dut,"rest_urls")['bgp_config'].format(vrf) + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data): + st.error("failed to config peer group") + return False + return True + else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -1893,6 +2892,65 @@ def create_bgp_neighbor_interface(dut, local_asn, interface_name, remote_asn,fam commands.append("exit") else: commands.append("exit") + elif cli_type in ["rest-patch","rest-put"]: + if family == "ipv4": + afi_safi_data = "openconfig-bgp-types:IPV4_UNICAST" + elif family == "l2vpn": + afi_safi_data = "L2VPN_EVPN" + else: + afi_safi_data = "openconfig-bgp-types:IPV6_UNICAST" + + data = dict() + data["openconfig-network-instance:bgp"] = dict() + data["openconfig-network-instance:bgp"]["global"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + data["openconfig-network-instance:bgp"]["neighbors"] = dict() + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() + neigh_data = dict() + neigh_data.update({"neighbor-address": interface_name}) + neigh_data["config"] = dict() + if str(remote_asn).isdigit(): + neigh_data["config"].update({"neighbor-address": interface_name, "peer-as": int(remote_asn)}) + else: + if remote_asn == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + neigh_data["config"].update({"neighbor-address": interface_name, "peer-type": peer_type}) + + if mode == "": + neigh_data_sub = dict() + neigh_data_sub["afi-safis"] = dict() + neigh_data_sub["afi-safis"]["afi-safi"] = list() + neigh_data_sub_data = dict() + neigh_data_sub_data["config"] = dict() + neigh_data_sub_data.update({"afi-safi-name": afi_safi_data}) + neigh_data_sub_data["config"].update({"afi-safi-name": afi_safi_data, "enabled": True}) + neigh_data_sub["afi-safis"]["afi-safi"].append(neigh_data_sub_data) + neigh_data.update(neigh_data_sub) + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + url = st.get_datastore(dut,"rest_urls")['bgp_config'].format("default") + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data): + st.error("failed to config bgp neighbor") + return False + return True + else: + neigh_data_sub = dict() + neigh_data_sub["afi-safis"] = dict() + neigh_data_sub["afi-safis"]["afi-safi"] = list() + neigh_data_sub_data = dict() + neigh_data_sub_data["config"] = dict() + neigh_data_sub_data.update({"afi-safi-name": afi_safi_data}) + neigh_data_sub_data["config"].update({"afi-safi-name": afi_safi_data, "enabled": False}) + neigh_data_sub["afi-safis"]["afi-safi"].append(neigh_data_sub_data) + neigh_data.update(neigh_data_sub) + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + url = st.get_datastore(dut, "rest_urls")['bgp_neighbor_config'].format("default") + if not config_rest(dut, rest_url=url,http_method=cli_type,json_data=data): + st.error("failed to un-config bgp neighbor activate") + return False + return True else: st.log("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) return False @@ -2011,25 +3069,196 @@ def config_bgp_multi_neigh_use_peergroup(dut, **kwargs): cmd += 'exit \n' cmd += 'exit \n' st.config(dut, cmd, type='klish') - else: - st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) - return False + elif cli_type in ["rest-patch","rest-put"]: + family = kwargs.get('family', None) + local_asn = kwargs.get('local_asn', None) + + peergroup = kwargs.get('peer_grp_name', '') + #remote_as = kwargs.get('remote_asn', None) + keepalive = kwargs.get('keep_alive', "60") + holdtime = kwargs.get('hold', "180") + password = kwargs.get('password', None) + redistribute = kwargs.get('redistribute', None) + vrf_name = kwargs.get('vrf', "default") + routeMap = kwargs.get('routemap', None) + remote_as = kwargs.get('remote_asn', None) + config = kwargs.get('config', "yes") + config_cmd = "" if config.lower() == 'yes' else "no" + if family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + elif family == "l2vpn": + afi_safi_name = "L2VPN_EVPN" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + data = dict() + + data["openconfig-network-instance:bgp"] = dict() + data["openconfig-network-instance:bgp"]["global"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + + data["openconfig-network-instance:bgp"]["neighbors"] = dict() + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() + data["openconfig-network-instance:bgp"]["peer-groups"] = dict() + data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() + + peer_data = dict() + peer_data.update({"peer-group-name": peergroup}) + peer_data["config"] = dict() + if str(remote_as).isdigit(): + peer_data["config"].update({"peer-group-name": peergroup, "peer-as": int(remote_as)}) + else: + if remote_as == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + peer_data["config"].update({"peer-group-name": peergroup, "peer-type": peer_type}) - return True + peer_data["afi-safis"] = dict() + peer_data["afi-safis"]["afi-safi"] = list() + peer_sub = dict() + if 'keep_alive' in kwargs and 'hold' in kwargs: + peer_data["timers"]=dict() + peer_data["timers"]["config"] = dict() + peer_data["timers"]["config"].update({"hold-time": str(holdtime), "keepalive-interval": str(keepalive)}) -def verify_bgp_summary(dut, family='ipv4', shell="sonic", **kwargs): - """ + if 'password' in kwargs: + peer_data["openconfig-bgp-ext:auth-password"] = dict() + peer_data["openconfig-bgp-ext:auth-password"]["config"] = dict() + peer_data["openconfig-bgp-ext:auth-password"]["config"].update({"password": password}) + if 'activate' in kwargs: + peer_sub.update( + {"afi-safi-name": afi_safi_name, "config": {"afi-safi-name": afi_safi_name, "enabled": True}}) - :param dut: - :param family: - :param shell: - :param kwargs: - :return: - """ - if shell not in ["vtysh", "klish", "rest-patch", "rest-put"]: - if 'vrf' in kwargs and shell=='sonic': - vrf = kwargs.pop('vrf') + open_data = dict() + open_data["openconfig-network-instance:table-connections"] = dict() + open_data["openconfig-network-instance:table-connections"]["table-connection"] = list() + if 'redistribute' in kwargs: + sub_data = dict() + redis_li = list(kwargs['redistribute']) if isinstance(kwargs['redistribute'], list) else [ + kwargs['redistribute']] + for each in redis_li: + if each == 'connected': + + sub_data["config"] = dict() + + redist_type = 'DIRECTLY_CONNECTED' + if config_cmd != 'no': + sub_data.update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + sub_data["config"].update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_redist_connected'] + url = url.format(vrf_name, redist_type) + if not delete_rest(dut, rest_url=url): + st.error("failed to do unconfig redistribute connected") + + + elif each == 'static': + + sub_data["config"] = dict() + + if config_cmd != 'no': + sub_data.update( + {"dst-protocol": "BGP", "address-family": family.upper(), + "src-protocol": each.upper()}) + sub_data["config"].update( + {"dst-protocol": "BGP", "address-family": family.upper(), + "src-protocol": each.upper()}) + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_redist_static'] + url = url.format(vrf_name, each.upper()) + if not delete_rest(dut, rest_url=url): + st.error("failed to do unconfig redistribute connected") + + + elif each == 'ospf': + + sub_data["config"] = dict() + + if config_cmd != 'no': + sub_data.update( + {"dst-protocol": "BGP", "address-family": family.upper(), + "src-protocol": each.upper()}) + sub_data["config"].update( + {"dst-protocol": "BGP", "address-family": family.upper(), + "src-protocol": each.upper()}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_redist_ospf'] + url = url.format(vrf_name, redistribute.upper()) + if not delete_rest(dut, rest_url=url): + st.error("failed to do unconfig redistribute connected") + + + + else: + st.error("unsupported redistribute value") + if sub_data: + open_data["openconfig-network-instance:table-connections"]["table-connection"].append(sub_data) + + + if 'routemap' in kwargs: + if 'routemap_dir' in kwargs: + if kwargs['routemap_dir'] == "out": + peer_sub.update( + {"apply-policy": { + "config": {"export-policy": [routeMap], "default-export-policy": "REJECT_ROUTE"}}}) + else: + peer_sub.update( + {"apply-policy": { + "config": {"import-policy": [routeMap], "default-import-policy": "REJECT_ROUTE"}}}) + + for each_neigh in neigh_ip_li: + neigh = { + "neighbor-address": each_neigh, + "config": { + "peer-group": peergroup, + "neighbor-address": each_neigh + + }, + "timers": { + "config": { + "connect-retry": "60"}} + + } + data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh) + + url = st.get_datastore(dut, "rest_urls")['bgp_config_route_map'].format(vrf_name) + if not config_rest(dut, rest_url=url, http_method=cli_type, json_data=open_data): + st.error("failed to config route-map") + + + peer_data["afi-safis"]["afi-safi"].append(peer_sub) + data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) + + url = st.get_datastore(dut, "rest_urls")['bgp_config'].format(vrf_name) + if not config_rest(dut, rest_url=url, http_method=cli_type, json_data=data): + st.error("failed configure neighbor") + return False + return True + + else: + st.error("UNSUPPORTED CLI TYPE -- {}".format(cli_type)) + return False + + return True + + +def verify_bgp_summary(dut, family='ipv4', shell="sonic", **kwargs): + """ + + :param dut: + :param family: + :param shell: + :param kwargs: + :return: + """ + if shell not in ["vtysh", "klish", "rest-patch", "rest-put"]: + if 'vrf' in kwargs and shell=='sonic': + vrf = kwargs.pop('vrf') cmd = "show bgp vrf {} {} summary".format(vrf,family.lower()) else: cmd = "show bgp {} summary".format(family.lower()) @@ -2051,17 +3280,23 @@ def verify_bgp_summary(dut, family='ipv4', shell="sonic", **kwargs): return False st.debug(output) + + # Specifically checking neighbor state if 'neighbor' in kwargs and 'state' in kwargs: neigh_li = list(kwargs['neighbor']) if isinstance(kwargs['neighbor'], list) else [kwargs['neighbor']] for each_neigh in neigh_li: #For dynamic neighbor, removing *, as it is not displayed in klish - if shell == 'klish' or cli_type =='klish': - st.log('For dynamic neighbor, removing *, as it is not displayed in klish') + if shell in ['klish','rest-patch','rest-put'] or cli_type in ['klish','rest-patch','rest-put']: + st.log('For dynamic neighbor, removing *, as it is not displayed in klish, rest-patch,rest-put') each_neigh = each_neigh.lstrip('*') match = {'neighbor': each_neigh} try: - entries = filter_and_select(output, None, match)[0] + entries = filter_and_select(output, None, match) + if not entries: + st.debug("Entries : {}".format(entries)) + return False + entries = entries[0] except Exception as e: st.error(e) st.log("Neighbor {} given state {}, matching with {} ".format(each_neigh, kwargs['state'], @@ -2196,6 +3431,98 @@ def config_address_family_redistribute(dut, local_asn, mode_type, mode, value, c cmd = cmd + 'exit\nexit\n' st.config(dut, cmd, type=cli_type, skip_error_check=skip_error_check, conf = True) return True + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + + #vrf_name1="" + vrf_name1=vrf.lower() + vrf_name1 = 'default' if vrf_name1 !=vrf.lower() else vrf_name1 + family=mode_type + if family == "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + elif family == "l2vpn": + afi_safi_name = "L2VPN_EVPN" + else: + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + data_asn=dict() + data_asn["openconfig-network-instance:bgp"] = dict() + data_asn["openconfig-network-instance:bgp"]["global"] = dict() + data_asn["openconfig-network-instance:bgp"]["global"]["config"] = dict() + data_asn["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(local_asn) + data_asn["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + data_asn["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() + afi_data=dict() + afi_data["config"]=dict() + afi_data.update({"afi-safi-name": afi_safi_name}) + afi_data["config"].update({"afi-safi-name": afi_safi_name}) + data_asn["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) + url=rest_urls['bgp_config'].format(vrf_name1) + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=data_asn): + st.error("failed to config local as") + if value: + if value == "connected": + redist_type = value.upper() + if redist_type == 'CONNECTED': redist_type = 'DIRECTLY_CONNECTED' + elif value == "static": + redist_type = value.upper() + elif value == "ospf": + redist_type = value.upper() + else: + st.log("invalid redist type") + + if cfgmode != "no": + if route_map: + url = rest_urls["bgp_route_redistribute"].format(vrf_name1) + data = { "openconfig-network-instance:table-connections": {"table-connection": [{ + "src-protocol": redist_type, + "dst-protocol": "BGP", + "address-family": family.upper(), + "config": { + "src-protocol": redist_type, + "address-family": family.upper(), + "dst-protocol": "BGP", + "import-policy": [ + route_map + ]}}]}, "protocols": {"protocol": [{ + "identifier": "BGP", + "name": "bgp", + "config": { + "identifier": "BGP", + "name": "bgp", + "enabled": True, + } + }]}} + if not config_rest(dut, rest_url=url, http_method=cli_type, json_data=data): + st.error("redistribute config failed") + + else: + url = rest_urls["bgp_route_redistribute"].format(vrf_name1) + data = {"openconfig-network-instance:table-connections": {"table-connection": [{ + "src-protocol": redist_type, + "dst-protocol": "BGP", + "address-family": family.upper(), + "config": { + "src-protocol": redist_type, + "address-family": family.upper(), + "dst-protocol": "BGP"}}]}, "protocols": {"protocol": [{ + "identifier": "BGP", + "name": "bgp", + "config": { + "identifier": "BGP", + "name": "bgp" + + } + }]}} + if not config_rest(dut, rest_url=url, http_method=cli_type, json_data=data): + st.error("redistribute config failed") + return False + + else: + url = st.get_datastore(dut,"rest_urls")['bgp_del_redist'].format(vrf_name1,redist_type,family.upper()) + if not delete_rest(dut, rest_url=url): + st.error("redistribute delete config failed") + return False + return True else: st.log("Unsupported CLI TYPE - {}".format(cli_type)) return False @@ -2266,6 +3593,7 @@ def config_bgp(dut, **kwargs): removeBGP = kwargs.get('removeBGP', 'no') diRection = kwargs.get('diRection', 'in') weight = kwargs.get('weight', None) + allowas_in = kwargs.get("allowas_in",None) config_cmd = "" if config.lower() == 'yes' else "no" my_cmd ='' if cli_type == "vtysh": @@ -2431,7 +3759,7 @@ def config_bgp(dut, **kwargs): no_neighbor = "no" if kwargs.get("config") == "no" else "" sub_list = ["neighbor", "routeMap", "shutdown", "activate", "nexthop_self", "pswd", "update_src", "bfd", "default_originate", "removePrivateAs", "no_neigh","remote-as","filter_list", - "prefix_list", "distribute_list", "weight", "keepalive", "holdtime", "ebgp_mhop","peergroup","update_src_intf","connect"] + "prefix_list", "distribute_list", "weight", "keepalive", "holdtime", "ebgp_mhop","peergroup","update_src_intf","connect","allowas_in"] if 'local_as' in kwargs and removeBGP != 'yes': if vrf_name != 'default': @@ -2556,7 +3884,7 @@ def config_bgp(dut, **kwargs): elif removePrivateAs: my_cmd = 'address-family {} unicast'.format(addr_family) commands.append(my_cmd) - my_cmd = '{} remove-private-AS'.format(config_cmd) + my_cmd = '{} remove-private-as'.format(config_cmd) commands.append(my_cmd) commands.append("exit") removePrivateAs = False @@ -2629,6 +3957,13 @@ def config_bgp(dut, **kwargs): my_cmd = '{} timers connect {}'.format(config_cmd, connect) commands.append(my_cmd) connect = None + elif allowas_in: + my_cmd = 'address-family {} unicast'.format(addr_family) + commands.append(my_cmd) + my_cmd = '{} allowas-in {}'.format(config_cmd, allowas_in) + commands.append(my_cmd) + commands.append("exit") + allowas_in = None st.log('config_bgp command_list: {}'.format(commands)) #come back to router bgp context @@ -2695,7 +4030,7 @@ def config_bgp(dut, **kwargs): elif type1 == 'peer_group': st.log("Configuring the peer_group on the device") else: - st.log('Invalid BGP config parameter') + st.log('Invalid BGP config parameter {}'.format(type1)) if config_cmd == 'no' and 'neighbor' in config_type_list and neigh_name and not peergroup: if isinstance(neigh_name, dict): my_cmd = "{} neighbor interface {} {}".format(config_cmd, neigh_name["type"],neigh_name["number"]) @@ -2728,51 +4063,74 @@ def config_bgp(dut, **kwargs): route_map = True if "routeMap" in config_type_list else False default_originate = True if "default_originate" in config_type_list else False removePrivateAs = True if "removePrivateAs" in config_type_list else False - #no_neighbor = "no" if kwargs.get("config") == "no" else "" + no_neighbor = "no" if kwargs.get("config") == "no" else "" sub_list = ["neighbor", "routeMap", "shutdown", "activate", "nexthop_self", "pswd", "update_src", "bfd", "default_originate", "removePrivateAs", "no_neigh", "remote-as", "filter_list", "prefix_list", "distribute_list", "weight", "keepalive", "holdtime", "ebgp_mhop", "peergroup", - "update_src_intf", "connect"] - bgp_data = dict() - bgp_data["openconfig-network-instance:bgp"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() - bgp_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["confederation"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["dynamic-neighbor-prefixes"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["dynamic-neighbor-prefixes"][ - "dynamic-neighbor-prefixe"] = list() - bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:bgp-ext-route-reflector"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:global-defaults"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:update-delay"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["openconfig-bgp-ext:max-med"] = dict() - bgp_data["openconfig-network-instance:bgp"]["neighbors"] = dict() - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() - bgp_data["openconfig-network-instance:bgp"]["peer-groups"] = dict() - bgp_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() - delete_urls = [] # All the delete URLS should be appended to this list - # neigh_name = get_interface_number_from_name(neighbor) - family = kwargs.get('family', None) + "update_src_intf", "connect","redist"] + #bgp_data = dict() + global_data = dict() + neigh_data = dict() + peer_data = dict() + common_data = dict() + + global_data["openconfig-network-instance:bgp"] = dict() + global_data["openconfig-network-instance:bgp"]["global"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["config"] = dict() + + open_data = dict() + open_data["openconfig-network-instance:table-connections"] = dict() + open_data["openconfig-network-instance:table-connections"]["table-connection"] = list() + + neigh_data = dict() + + common_data = dict() + + open_data = dict() + open_data["openconfig-network-instance:table-connections"] = dict() + open_data["openconfig-network-instance:table-connections"]["table-connection"] = list() + if neighbor: + neigh_data = dict() + global_data["openconfig-network-instance:bgp"]["neighbors"] = dict() + global_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"] = list() + neigh_data.update({"neighbor-address": neighbor}) + neigh_data["config"] = dict() + if peergroup: + peer_data = dict() + global_data["openconfig-network-instance:bgp"]["peer-groups"] = dict() + global_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"] = list() + peer_data.update({'peer-group-address': peergroup}) + peer_data['config'] = dict() + + family = kwargs.get('addr_family', "ipv4") if family == "ipv4": afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + elif addr_family == "l2vpn": + afi_safi_name = "L2VPN_EVPN" else: afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + if 'local_as' in kwargs and removeBGP != 'yes': - bgp_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = kwargs.get("local_as") + global_data["openconfig-network-instance:bgp"]["global"]["config"]["as"] = int(kwargs.get("local_as")) if router_id: - bgp_data["openconfig-network-instance:bgp"]["global"]["config"]["router-id"] = router_id + if config_cmd != "no": + global_data["openconfig-network-instance:bgp"]["global"]["config"]["router-id"] = router_id + else: + url=st.get_datastore(dut,"rest_urls")["bgp_del_rid"].format(vrf_name) + if not delete_rest(dut,rest_url=url): + st.error("failed to unconfig router-id") if peergroup: - # print(bgp_data) - peer_data = dict() - peer_data.update({'peer-group-address': peergroup}) - bgp_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) - peer_data['config'] = dict() - peer_data["config"].update({'peer-group-address': peergroup}) + if config_cmd != 'no': + peer_data.update({'peer-group-address': peergroup}) + + peer_data['config'] = dict() + peer_data["config"].update({'peer-group-address': peergroup}) + else: + url=st.get_datastore(dut,"rest_urls")['bgp_del_peer_group'].format(vrf_name,peergroup) + if not delete_rest(dut,rest_url=url): + st.error("failed to unconfig peergroup") config_default_activate = True config_remote_as = True @@ -2781,357 +4139,845 @@ def config_bgp(dut, **kwargs): for type1 in config_type_list: if type1 in sub_list: if neighbor and not peergroup: - neigh_data = dict() + neigh_data.update({"neighbor-address": neighbor}) neigh_data["config"] = dict() neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + if peergroup: - peer_data = dict() - peer_data.update({'peer-group-address': peergroup}) - bgp_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) - peer_data['config'] = dict() - peer_data["config"].update({'peer-group-address': peergroup}) - if 'peergroup' in config_type_list: - # if isinstance(neigh_name, dict): - if activate and config_cmd == "no": - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - # delete_rest(dut, rest_url=url) - if not delete_rest(dut, rest_url=url): - st.error("neighbor is failed") + if config_cmd != 'no': + peer_data = dict() + peer_data.update({'peer-group-address': peergroup}) + + peer_data['config'] = dict() + peer_data["config"].update({'peer-group-address': peergroup}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_peer_group'].format(vrf_name,peergroup) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete peer group") + + if 'peergroup' in config_type_list: + + if activate and no_neighbor == "no": + url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] + url=url.format(vrf_name, neighbor) + if not delete_rest(dut, rest_url=url): + st.error("neighbor delete is failed") + else: - neigh_data = dict() + neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() + neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - if neighbor: - neigh_data1 = dict() - neigh_data1["afi-safis"] = dict() - neigh_data1["afi-safis"]["afi-safi"] = list() - neigh_data1_sub = dict() - neigh_data1_sub.update({"afi-safi-name": afi_safi_name}) - neigh_data1_sub["config"] = dict() - neigh_data1_sub["config"].update({"afi-safi-name": "afi-safi-name", "enabled": True}) - neigh_data1.update(neigh_data1_sub) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data1) if config_remote_as and remote_as: if interface and not peergroup: - neigh_data = dict() + neigh_data.update({"neighbor-address": neighbor}) neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor, "peer-type": remote_as}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - config_remote_as = False + neigh_data["config"].update({"neighbor-address": neighbor}) + + if config_cmd != 'no': + + neigh_data.update({"neighbor-address": neighbor}) + + if str(remote_as).isdigit(): #peer_as = remote_as else peer_type = remote_as + neigh_data["config"].update({"neighbor-address": neighbor, "peer-as": int(remote_as)}) + else: + if remote_as == "internal": + peer_type= "INTERNAL" + else: + peer_type="EXTERNAL" + neigh_data["config"].update({"neighbor-address": neighbor, "peer-type": peer_type}) + + else: + url=st.get_datastore(dut,"rest_urls")['bgp_del_remote_as'] + url=url.format(vrf_name,neighbor) + if not delete_rest(dut,rest_url=url): + st.error("failed to delete remote-as") + + #config_remote_as = False if config_default_activate and (activate or neighbor): if config_cmd == "": - family = kwargs.get('family', None) - if family == "ipv4": - afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" - else: + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name, "enabled": True}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + neigh_data_sub = dict() + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name,"enabled":True}) + + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + neigh_data_sub1 = dict() + neigh_data_sub1.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub1["config"] = dict() + neigh_data_sub1["config"].update({"afi-safi-name": afi_safi_name, "enabled": True}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub1) elif activate and config_cmd == "no": - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") - activate = None + + family = kwargs.get('addr_family', 'ipv4') + if family != "ipv4": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + neigh_data_sub = dict() + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name, "enabled": False}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #activate = None if shutdown: - neigh_data["config"].update({"enabled": False}) - shutdown = None + common_data["config"] = dict() + if config_cmd != 'no': + common_data["config"].update({"enabled": True}) + else: + common_data["config"].update({"enabled": False}) + #shutdown = None elif route_map: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - neigh_data["afi-safis"] = dict() - neigh_data["afi-safis"]["afi-safi"] = list() + + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + if not common_data: + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() neigh_data_sub = dict() - neigh_data_sub.update({"afi-safi-name": "afi-safi-name"}) + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) neigh_data_sub["config"] = dict() - neigh_data_sub["config"].update({"afi-safi-name": "afi-safi-name"}) - neigh_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name}) neigh_data_sub["apply-policy"] = dict() neigh_data_sub["apply-policy"]["config"] = dict() - neigh_data_sub["apply-policy"]["config"].update({"import-policy": ["route-map"]}) - route_map = False + if config_cmd != 'no': + if diRection == 'in': + neigh_data_sub["apply-policy"]["config"].update({"import-policy": [routeMap]}) + else: + neigh_data_sub["apply-policy"]["config"].update({"export-policy": [routeMap]}) + else: + if diRection == 'in': + url=st.get_datastore(dut,"rest_urls")['bgp_del_route_map_in'] + url=url.format(vrf_name,neighbor,afi_safi_name[21:],routeMap) + if not delete_rest(dut,rest_url=url): + st.error("failed to delete route-map inboud") + + else: + url = st.get_datastore(dut,"rest_urls")['bgp_del_route_map_out'] + url = url.format(vrf_name, neighbor, afi_safi_name[21:], routeMap) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete route-map outbound") + + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #route_map = False elif filter_list: + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + if not common_data: + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + neigh_data_sub = dict() + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name}) neigh_data_sub["openconfig-bgp-ext:filter-list"] = dict() neigh_data_sub["openconfig-bgp-ext:filter-list"]["config"] = dict() - neigh_data_sub["openconfig-bgp-ext:filter-list"]["config"].update({"import-policy": "filter-list"}) - filter_list = None + if config_cmd != 'no': + if diRection == 'in': + neigh_data_sub["openconfig-bgp-ext:filter-list"]["config"].update( + {"import-policy": filter_list}) + else: + neigh_data_sub["openconfig-bgp-ext:filter-list"]["config"].update( + {"export-policy": filter_list}) + else: + if diRection == 'in': + url = st.get_datastore(dut,"rest_urls")['bgp_del_filter_list_in'] + url = url.format(vrf_name, neighbor, afi_safi_name[21:]) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete filter-list inboud") + + else: + url = st.get_datastore(dut,"rest_urls")['bgp_del_filter_list_out'] + url = url.format(vrf_name, neighbor, afi_safi_name[21:]) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete filter-list outbound") + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #filter_list = None elif prefix_list: + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + if not common_data: + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + + neigh_data_sub = dict() + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["openconfig-bgp-ext:prefix-list"] = dict() neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"] = dict() - neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"].update({"import-policy": "prefix-list"}) - prefix_list = None + if config_cmd != 'no': + if diRection == 'in': + neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"].update( + {"import-policy": prefix_list}) + else: + neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"].update( + {"export-policy": prefix_list}) + else: + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": + afi_safi_name_del = "IPV6_UNICAST" + else: + afi_safi_name_del = "IPV4_UNICAST" + if diRection == 'in': + url = st.get_datastore(dut, "rest_urls")['bgp_del_prefix_list_in'] + url = url.format(vrf_name, neighbor, afi_safi_name_del) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete prefix-list inboud") + return False + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_prefix_list_out'] + url = url.format(vrf_name, neighbor, afi_safi_name_del) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete prefix-list outbound") + + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #prefix_list = None elif distribute_list: + + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + neigh_data_sub = dict() + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name}) neigh_data_sub["openconfig-bgp-ext:prefix-list"] = dict() neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"] = dict() - neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"].update({"import-policy": "prefix-list"}) - distribute_list = None + if config_cmd != 'no': + if diRection == 'in': + neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"].update( + {"import-policy": prefix_list}) + else: + neigh_data_sub["openconfig-bgp-ext:prefix-list"]["config"].update( + {"export-policy": prefix_list}) + else: + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": + afi_safi_name_del = "IPV6_UNICAST" + else: + afi_safi_name_del = "IPV4_UNICAST" + if diRection == 'in': + url = st.get_datastore(dut, "rest_urls")['bgp_del_prefix_list_in'] + url = url.format(vrf_name, neighbor, afi_safi_name_del,prefix_list) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete ditribute-list inboud") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_prefix_list_out'] + url = url.format(vrf_name, neighbor, afi_safi_name_del,prefix_list) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete distribute-list outbound") + + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #prefix_list = None elif default_originate: - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data["ipv4-unicast"] = dict() - afi_data["ipv4-unicast"]["config"] = dict() - afi_data.update({"afi-safi-name": afi_safi_name, "enabled": True}) + #family = kwargs.get('addr_family', "ipv4") + #if family == "ipv6": + #afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + #else: + #afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + if not common_data: + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + neigh_data_sub = dict() + neigh_data_sub["ipv4-unicast"] = dict() + neigh_data_sub["ipv4-unicast"]["config"] = dict() + if 'routeMap' in kwargs: - afi_data["ipv4-unicast"]["config"].update({"send-default-route": True, "openconfig-bgp-ext:default-policy-name": "routeMap"}) + if config_cmd != 'no': + neigh_data_sub["ipv4-unicast"]["config"].update({"send-default-route": True, "openconfig-bgp-ext:default-policy-name": routeMap}) + + else: + neigh_data_sub["ipv4-unicast"]["config"].update({"send-default-route": False}) else: - afi_data["ipv4-unicast"]["config"].update({"send-default-route": True}) - default_originate = False + neigh_data_sub["ipv4-unicast"]["config"].update({"send-default-route": True}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #default_originate = False elif removePrivateAs: + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() + neigh_data_sub = dict() + neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name}) neigh_data_sub["openconfig-bgp-ext:remove-private-as"] = dict() neigh_data_sub["openconfig-bgp-ext:remove-private-as"]["config"] = dict() - neigh_data_sub["openconfig-bgp-ext:remove-private-as"]["config"].update({"enabled": True}) - removePrivateAs = False + if config_cmd != "no": + neigh_data_sub["openconfig-bgp-ext:remove-private-as"]["config"].update({"enabled": True}) + else: + neigh_data_sub["openconfig-bgp-ext:remove-private-as"]["config"].update({"enabled": False}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + #removePrivateAs = False elif weight: - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name, "enabled": True}) - neigh_data = dict() - neigh_data["afi-safis"] = dict() - neigh_data["afi-safis"]["afi-safi"] = list() + + family = kwargs.get('addr_family', "ipv4") + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() neigh_data_sub = dict() neigh_data_sub.update({"afi-safi-name": afi_safi_name}) - neigh_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + neigh_data_sub['config'] = dict() - neigh_data_sub['config'].update( - {"afi-safi-name": afi_safi_name, "enabled": True, "openconfig-bgp-ext:weight": 0}) - neigh_data["afi-safis"]["afi-safi"].append(neigh_data_sub) - bgp_data["openconfig-network-instance:bgp"]["global"]["neighbors"]["neighbor"].append(neigh_data) - weight = None + if config_cmd!='no': + neigh_data_sub['config'].update({"afi-safi-name": afi_safi_name, "enabled": True, "openconfig-bgp-ext:weight": int(weight)}) + else: + if neighbor and not peergroup: + url=st.get_datastore(dut,"rest_urls")['bgp_del_weight'] + url=url.format(vrf_name,neighbor,family) + if not delete_rest(dut,rest_url=url): + st.error("failed to delete weight") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_weight_peer'] + url = url.format(vrf_name, peergroup, family) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete weight") + + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + + #weight = None elif keepalive and holdtime: - if isinstance(neighbor, dict): - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") + family=kwargs.get("addr_family",'ipv4') + if neighbor: + if no_neighbor == "no": + url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'].format("default",neighbor) + + if not delete_rest(dut, rest_url=url): + st.error("neighbor delete is failed") + + else: + neigh_data.update({"neighbor-address":neighbor}) + neigh_data["config"].update({"neighbor-address": neighbor}) + + common_data["timers"] = dict() + common_data["timers"]["config"] = dict() + + if config_cmd != 'no': + common_data["timers"]["config"].update({"hold-time": str(holdtime), "keepalive-interval": str(keepalive)}) else: - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") - neigh_data["timers"] = dict() - neigh_data["timers"]["config"] = dict() - neigh_data["timers"]["config"].update({"hold-time": holdtime, "keepalive-interval": keepalive}) - keepalive = 0 - holdtime = 0 + if neighbor and not peergroup: + url = st.get_datastore(dut,"rest_urls")['bgp_del_timers'] + url=url.format(vrf_name,neighbor,family) + if not delete_rest(dut,rest_url=url): + st.error("failed to delete timers") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_timers_peer'] + url = url.format(vrf_name, peergroup,family) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete timers") + + #keepalive = 0 + #holdtime = 0 elif nexthop_self: - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name, "enabled": True}) - neigh_data = dict() - neigh_data.update({"neighbor-address": "string"}) - neigh_data["afi-safis"] = dict() - neigh_data["afi-safis"]["afi-safi"] = list() + + common_data["afi-safis"] = dict() + common_data["afi-safis"]["afi-safi"] = list() neigh_data_sub = dict() neigh_data_sub.update({"afi-safi-name": afi_safi_name}) + neigh_data_sub["config"] = dict() + neigh_data_sub["config"].update({"afi-safi-name": afi_safi_name}) neigh_data_sub["openconfig-bgp-ext:next-hop-self"] = dict() neigh_data_sub["openconfig-bgp-ext:next-hop-self"]["config"] = dict() - neigh_data_sub["openconfig-bgp-ext:next-hop-self"]["config"].update( - {"enabled": True, "force": True}) - neigh_data["afi-safis"]["afi-safi"].append(neigh_data_sub) - bgp_data["openconfig-network-instance:bgp"]["global"]["neighbors"]["neighbor"].append(neigh_data) - nexthop_self = None + if config_cmd !='no': + neigh_data_sub["openconfig-bgp-ext:next-hop-self"]["config"].update( + {"enabled": True}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + else: + if neighbor and not peergroup: + url = st.get_datastore(dut, "rest_urls")['bgp_del_nexthop_self'] + url = url.format(vrf_name,neighbor,family) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete nexthop self") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_nexthop_self_peer'] + url = url.format(vrf_name, peergroup,family) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete nexthop self") + + #nexthop_self = None elif pswd: password = "" if config_cmd == 'no' else password - if password: + #neigh_data_sub = dict() + if config_cmd != 'no': neigh_data["openconfig-bgp-ext:auth-password"] = dict() neigh_data["openconfig-bgp-ext:auth-password"]["config"] = dict() neigh_data["openconfig-bgp-ext:auth-password"]["config"].update({"password": password}) + else: - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") - pswd = False + if neighbor and not peergroup: + url = st.get_datastore(dut, "rest_urls")['bgp_del_pwd'] + url = url.format(vrf_name, neighbor) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete pwd") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_pwd'] + url = url.format(vrf_name, peergroup) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete pwd") + + + #pswd = False elif update_src: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - update_src = None + if neighbor: + if no_neighbor == 'no': + url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'].format("default",neighbor) + + if not delete_rest(dut, rest_url=url.format("default", neighbor)): + st.error("neighbor is failed") + + else: + neigh_data.update({"neighbor-address": neighbor}) + neigh_data["config"].update({"neighbor-address": neighbor}) + common_data.update(neigh_data) + neigh_data_sub = dict() + neigh_data_sub["transport"]=dict() + neigh_data_sub["transport"]["config"] = dict() + if config_cmd!='no': + neigh_data_sub["transport"]["config"].update({"local-address": update_src}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) + else: + if neighbor and not peergroup: + url = st.get_datastore(dut, "rest_urls")['bgp_del_update_src'] + url = url.format(vrf_name, neighbor) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete update_src") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_update_src_peer'] + url = url.format(vrf_name, peergroup) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete update_src") + + #update_src = None elif update_src_intf: if neighbor: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + if no_neighbor == 'no': + url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'].format("default", neighbor) + + if not delete_rest(dut, rest_url=url.format("default", neighbor)): + st.error("neighbor is failed") + + else: + neigh_data.update({"neighbor-address": neighbor}) + neigh_data["config"].update({"neighbor-address": neighbor}) + + neigh_data_sub = dict() + neigh_data_sub["transport"] = dict() + neigh_data_sub["transport"]["config"] = dict() + if config_cmd != 'no': + neigh_data_sub["transport"]["config"].update({"local-address": update_src}) + common_data["afi-safis"]["afi-safi"].append(neigh_data_sub) else: - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") - if update_src_intf: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - neigh_data["transport"] = dict() - neigh_data["transport"]["config"] = dict() - neigh_data.update({"local-address": update_src_intf}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - update_src_intf = None + if neighbor and not peergroup: + url = st.get_datastore(dut, "rest_urls")['bgp_del_update_src'] + url = url.format(vrf_name, neighbor) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete update_src") + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_update_src_peer'] + url = url.format(vrf_name, peergroup) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete update_src") + + #update_src_intf = None elif ebgp_mhop: if neighbor: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + if no_neighbor == 'no': + url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'].format("default",neighbor) + + if not delete_rest(dut, rest_url=url.format("default", neighbor)): + st.error("neighbor is failed") + + else: + neigh_data.update({"neighbor-address": neighbor}) + neigh_data["config"].update({"neighbor-address": neighbor}) + + neigh_data_sub = dict() + neigh_data_sub["ebgp-multihop"] = dict() + neigh_data_sub["ebgp-multihop"]["config"] = dict() + if config_cmd != 'no': + neigh_data_sub["ebgp-multihop"]["config"].update({"enabled": True,"multihop-ttl": int(ebgp_mhop) }) + + neigh_data.update(neigh_data_sub) else: - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - delete_urls.append(url.format("default", neighbor)) - if delete_urls: - for url in delete_urls: - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - neigh_data["ebgp-multihop"] = dict() - neigh_data["ebgp-multihop"]["config"] = dict() - neigh_data.update({"multihop-ttl": ebgp_mhop}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - ebgp_mhop = None + if neighbor and not peergroup: + url = st.get_datastore(dut, "rest_urls")['bgp_del_ebgp_mhop'] + url = url.format(vrf_name, neighbor) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete ebgp mhop") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_ebgp_mhop_peer'] + url = url.format(vrf_name, peergroup) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete ebgp mhop") + + #ebgp_mhop = None elif bfd: - if interface and remote_as: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - elif neighbor and not interface and remote_as: - neigh_data = dict() - neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() - neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - neigh_data["config"].update({"neighbor-address": neighbor, "peer-type": remote_as}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - neigh_data["openconfig-bfd:enable-bfd"] = dict() - neigh_data["openconfig-bfd:enable-bfd"]["config"] = dict() - neigh_data.update({"enabled": True}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - bfd = False + if (neighbor or interface) and remote_as: + + st.log("interface:") + st.log(interface) + st.log("neighbor:") + st.log(neighbor) + if neighbor: + + neigh_data.update({"neighbor-address": neighbor}) + neigh_data["config"] = dict() + neigh_data["config"].update({"neighbor-address": neighbor}) + if str(remote_as).isdigit(): + neigh_data["config"].update({"neighbor-address": neighbor, "peer-as": int(remote_as)}) + else: + if remote_as == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + neigh_data["config"].update({"neighbor-address": neighbor, "peer-type": peer_type}) + else: + neigh_data.update({"neighbor-address": interface}) + neigh_data["config"] = dict() + neigh_data["config"].update({"neighbor-address": interface}) + if str(remote_as).isdigit(): + neigh_data["config"].update({"neighbor-address": interface, "peer-as": int(remote_as)}) + else: + if remote_as == "internal": + peer_type = "INTERNAL" + else: + peer_type = "EXTERNAL" + neigh_data["config"].update({"neighbor-address": interface, "peer-type": peer_type}) + + + neigh_data_sub = dict() + neigh_data_sub["openconfig-bfd:enable-bfd"] = dict() + neigh_data_sub["openconfig-bfd:enable-bfd"]["config"] = dict() + if config_cmd != 'no': + neigh_data_sub["openconfig-bfd:enable-bfd"]["config"].update({"enabled": True}) + else: + url=st.get_datastore(dut,"rest_urls")['bgp_del_bfd'] + url=url.format(vrf_name,neighbor) + if not delete_rest(dut,rest_url=url): + st.error("failed to disable bfd") + + neigh_data.update(neigh_data_sub) + global_data["openconfig-network-instance:bgp"]["neighbors"]=dict() + global_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"]=list() + global_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + #bfd = False elif connect: - peer_data = dict() - peer_data.update({'peer-group-address': peergroup}) - bgp_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) - peer_data['timers'] = dict() - peer_data['timers']["config"] = dict() - peer_data.update({"connect-retry": 10}) - connect = None + common_data["timers"] = dict() + common_data["timers"]["config"] = dict() + if config_cmd != 'no': + common_data["timers"]["config"].update({"connect-retry": str(connect)}) + else: + if neighbor and not peergroup: + url = st.get_datastore(dut, "rest_urls")['bgp_del_connect'] + url = url.format(vrf_name, neighbor) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete connect") + + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_connect_peer'] + url = url.format(vrf_name, peergroup) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete connect") + + #connect = None elif type1 == 'fast_external_failover': - bgp_data["openconfig-network-instance:bgp"]["global"]["config"].update( - {"openconfig-bgp-ext:fast-external-failover": True}) + st.log("Configuring the fast_external_failover") + + if config_cmd!='no': + global_data["openconfig-network-instance:bgp"]["global"]["config"].update({"openconfig-bgp-ext:fast-external-failover": True}) + else: + global_data["openconfig-network-instance:bgp"]["global"]["config"][config].update({"openconfig-bgp-ext:fast-external-failover": False}) elif type1 == 'bgp_bestpath_selection': - bgp_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update( - {"external-compare-router-id": True}) + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]=dict() + global_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"]=dict() + global_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"]["config"] = dict() + if 'as-path confed' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update({"openconfig-bgp-ext:compare-confed-as-path": True}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_as_path_confed'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig as-path confed") + + if 'as-path ignore' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update({"openconfig-network-instance:ignore-as-path-length": True}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_as_path_ig'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig as-path ignore") + + if 'as-path multipath-relax' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"]["config"].update( + {"allow-multiple-as": True, "openconfig-bgp-ext:as-set": False}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_as_path_multipath_relax'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig as-path multipath-relax") + + if 'as-path multipath-relax as-set' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"]["config"].update({"allow-multiple-as": True, "openconfig-bgp-ext:as-set": True}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_as_path_multipath_relax_as'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig as-path multipath-relax as-set") + + if 'compare-routerid' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update({"external-compare-router-id": True}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_comp_rid'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig as-path compare-routerid") + + if 'med confed' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update({"openconfig-bgp-ext:med-missing-as-worst": False}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_med'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig med confed") + + if 'med confed missing-as-worst' in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update({"openconfig-bgp-ext:med-missing-as-worst": True}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_med'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig med confed") + + if ('med missing-as-worst confed' or 'med missing-as-worst') in bgp_bestpath_selection: + if config_cmd != 'no': + global_data["openconfig-network-instance:bgp"]["global"]["route-selection-options"]["config"].update({"openconfig-bgp-ext:med-missing-as-worst": True}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_med'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig med confed") + elif type1 == 'max_path_ibgp': - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name, "enabled": True}) - afi_data["use-multiple-paths"] = dict() - afi_data["use-multiple-paths"]["ibgp"] = dict() - afi_data["use-multiple-paths"]["ibgp"]["config"] = dict() - afi_data["use-multiple-paths"]["ibgp"]["config"].update({"maximum-paths": type1}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) + sub_data = dict() + sub_data["config"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + sub_data.update({"afi-safi-name":afi_safi_name}) + sub_data["config"].update({"afi-safi-name":afi_safi_name}) + sub_data["use-multiple-paths"] = dict() + sub_data["use-multiple-paths"]["ibgp"] = dict() + sub_data["use-multiple-paths"]["ibgp"]["config"] = dict() + if config_cmd != 'no': + sub_data["use-multiple-paths"]["ibpg"]["config"].update({"maximum-paths": max_path_ibgp}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_max_path_ibgp'] + url = url.format(vrf_name, family) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig max paths ibgp") + + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(sub_data) elif type1 == 'max_path_ebgp': - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": 'afi-safi-name'}) - if config_cmd == '' or config_cmd == 'yes': - afi_data["use-multiple-paths"] = dict() - afi_data["use-multiple-paths"]["ebgp"] = dict() - afi_data["use-multiple-paths"]["ebgp"]["config"] = dict() - afi_data["use-multiple-paths"]["ebgp"]["config"].update({"maximum-paths": type1}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) + sub_data = dict() + sub_data["config"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + sub_data.update({"afi-safi-name":afi_safi_name}) + sub_data["config"].update({"afi-safi-name":afi_safi_name}) + sub_data["use-multiple-paths"] = dict() + sub_data["use-multiple-paths"]["ebgp"] = dict() + sub_data["use-multiple-paths"]["ebgp"]["config"] = dict() + if config_cmd != 'no': + sub_data["use-multiple-paths"]["ebgp"]["config"].update({"maximum-paths": max_path_ebgp}) else: - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"][ - "config"] = dict() - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"]["ebgp"][ - "config"].update({"allow-multiple-as": True}) + url = st.get_datastore(dut, "rest_urls")['bgp_del_max_path_ebgp'] + url = url.format(vrf_name, family) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig max paths ebgp") + + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(sub_data) elif type1 == 'redist': - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - # my_cmd = '{} redistribute {}'.format(config_cmd, redistribute) # SW defect is there + if redistribute=="connected": + sub_data = dict() + sub_data["config"] = dict() + redist_type = 'connected' + redist_type = redist_type.upper() + if redist_type == 'CONNECTED': redist_type = 'DIRECTLY_CONNECTED' + if config_cmd != 'no': + sub_data.update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + sub_data["config"].update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_redist_connected'] + url = url.format(vrf_name, redist_type) + if not delete_rest(dut, rest_url=url): + st.error("failed to do unconfig redistribute connected") + + + elif redistribute == 'static': + sub_data = dict() + sub_data["config"] = dict() + redist_type = 'static' + redist_type = redist_type.upper() + if config_cmd != 'no': + sub_data.update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + sub_data["config"].update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_redist_static'] + url = url.format(vrf_name, redist_type) + if not delete_rest(dut, rest_url=url): + st.error("failed to do unconfig redistribute static") + + + elif redistribute == "ospf": + sub_data = dict() + sub_data["config"] = dict() + redist_type = 'ospf' + redist_type = redist_type.upper() + if config_cmd != 'no': + sub_data.update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + sub_data["config"].update( + {"dst-protocol": "BGP", "address-family": family.upper(), "src-protocol": redist_type}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_redist_ospf'] + url = url.format(vrf_name, redist_type) + if not delete_rest(dut, rest_url=url): + st.error("failed to do unconfig redistribute ospf") + + else: + st.error("unsupported commands") + sub_data1 = dict() + sub_data1["config"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + sub_data1.update({"afi-safi-name": afi_safi_name}) + sub_data1["config"].update({"afi-safi-name": afi_safi_name}) + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(sub_data1) + + open_data["openconfig-network-instance:table-connections"]["table-connection"].append(sub_data) + elif type1 == 'network': - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - afi_data["openconfig-bgp-ext:network-config"] = dict() - afi_data["openconfig-bgp-ext:network-config"]["network"] = list() - obe_data = dict() - obe_data.update({"prefix": "string"}) - obe_data["config"] = dict() - obe_data.update({"prefix": "network"}) - afi_data["openconfig-bgp-ext:network-config"]["network"].append(obe_data) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) + sub_data = dict() + sub_data["config"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + sub_data.update({"afi-safi-name":afi_safi_name}) + sub_data["config"].update({"afi-safi-name":afi_safi_name}) + if config_cmd != 'no': + sub_data["openconfig-bgp-ext:network-config"] = dict() + sub_data["openconfig-bgp-ext:network-config"]["network"] = list() + sub_data_net = dict() + sub_data_net["config"] = dict() + sub_data_net.update({"prefix": network}) + sub_data_net["config"].update({"prefix": network}) + sub_data["openconfig-bgp-ext:network-config"]["network"].append(sub_data_net) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_network'] + url = url.format(vrf_name, afi_safi_name, network) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig network") + + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(sub_data) elif type1 == 'import-check': - bgp_data["openconfig-network-instance:bgp"]["global"]["config"].update({"openconfig-bgp-ext:network-import-check": True}) + global_sub_data = dict() + if config_cmd != 'no': + global_sub_data.update({"openconfig-bgp-ext:network-import-check": True}) + else: + global_sub_data.update({"openconfig-bgp-ext:network-import-check": False}) + global_data["openconfig-network-instance:bgp"]["config"].update(global_sub_data) elif type1 == 'import_vrf': - afi_data = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - bgp_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(afi_data) - afi_data['config'] = dict() - afi_data.update({"afi-safi-name": afi_safi_name}) - afi_data["openconfig-bgp-ext:import-network-instance"] = dict() - afi_data["openconfig-bgp-ext:import-network-instance"]["config"] = dict() - afi_data["openconfig-bgp-ext:import-network-instance"]["config"].update({"name": "import_vrf"}) + sub_data = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"] = dict() + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"] = list() + if family == "ipv6": + afi_safi_name = "openconfig-bgp-types:IPV6_UNICAST" + else: + afi_safi_name = "openconfig-bgp-types:IPV4_UNICAST" + sub_data.update({"afi-safi-name":afi_safi_name}) + sub_data["config"].update({"afi-safi-name": afi_safi_name}) + if config_cmd != 'no': + sub_data["openconfig-bgp-ext:import-network-instance"] = dict() + sub_data["openconfig-bgp-ext:import-network-instance"]["config"] = dict() + sub_data["openconfig-bgp-ext:import-network-instance"]["config"].update({"name": vrf_name}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_vrf'] + url = url.format(vrf_name, afi_safi_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to delete vrf") + + global_data["openconfig-network-instance:bgp"]["global"]["afi-safis"]["afi-safi"].append(sub_data) elif type1 == 'multipath-relax': - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"] = dict() - ump_data = dict() - ump_data["ebpg"] = dict() - ump_data["ebpg"]["config"] = dict() - ump_data["ebpg"]["config"].update({"allow-multiple-as": True}) - bgp_data["openconfig-network-instance:bgp"]["global"]["use-multiple-paths"].update(ump_data) + if config_cmd != 'no': + global_data["config"].update({"allow-multiple-as": True, "openconfig-bgp-ext:as-set": False}) + else: + url = st.get_datastore(dut, "rest_urls")['bgp_del_bp_as_path_multipath_relax'] + url = url.format(vrf_name) + if not delete_rest(dut, rest_url=url): + st.error("failed to unconfig as-path multipath-relax") + elif type1 == 'removeBGP': st.log("Removing the bgp config from the device") elif type1 == 'router_id': @@ -3139,34 +4985,50 @@ def config_bgp(dut, **kwargs): elif type1 == 'peer_group': st.log("Configuring the peer_group on the device") else: - st.log('Invalid BGP config parameter') + st.log('{} Invalid BGP config parameter {} '.format(cli_type,type1)) if config_cmd == 'no' and 'neighbor' in config_type_list and neighbor and not peergroup: - #if isinstance(neigh_name, dict): - if neighbor and config_cmd == "no": - url = st.get_datastore(dut, "rest_urls")['bgp_del_neighbor_config'] - if delete_urls: - for url in delete_urls: - delete_rest(dut, rest_url=url.format("default", neighbor)) - if not delete_rest(dut, rest_url=url.format("default", neighbor)): - st.error("neighbor is failed") - else: - neigh_data = dict() + + if config_cmd != 'no': + neigh_data.update({"neighbor-address": neighbor}) - neigh_data["config"] = dict() + neigh_data["config"].update({"neighbor-address": neighbor}) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) - if neighbor: - neigh_data1 = dict() - neigh_data1["afi-safis"] = dict() - neigh_data1["afi-safis"]["afi-safi"] = list() - neigh_data1_sub = dict() - neigh_data1_sub.update({"afi-safi-name": afi_safi_name}) - neigh_data1_sub["config"] = dict() - neigh_data1_sub["config"].update({"afi-safi-name": "afi-safi-name", "enabled": True}) - neigh_data1.update(neigh_data1_sub) - bgp_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data1) + + else: + url=st.get_datastore(dut,"rest_urls")['bgp_del_remote_as'] + url=url.format(vrf_name,neighbor) + if not delete_rest(dut,rest_url=url): + st.error("failed to delete remote-as") + + #config_remote_as = False if vrf_name != 'default' and removeBGP == 'yes': - bgp_data["openconfig-network-instance:bgp"]["global"]["config"].update({"as": 0}) + global_data["openconfig-network-instance:bgp"]["global"]["config"].update({"as": 0}) + + st.log(common_data) + st.log(global_data) + + if neighbor: + neigh_data.update(common_data) + global_data["openconfig-network-instance:bgp"]["neighbors"]["neighbor"].append(neigh_data) + elif peergroup: + peer_data.update(common_data) + global_data["openconfig-network-instance:bgp"]["peer-groups"]["peer-group"].append(peer_data) + + st.log(vrf_name) + url = st.get_datastore(dut,"rest_urls")["bgp_config"].format(vrf_name) + if not config_rest(dut,rest_url=url,http_method=cli_type,json_data=global_data): + st.error("failed to conifg bgp data") + url = st.get_datastore(dut, "rest_urls")['bgp_config_route_map'].format(vrf_name) + try: + if not open_data["openconfig-network-instance:table-connections"]["table-connection"]: + st.log("open_data is empty") + else: + if not config_rest(dut, rest_url=url, http_method=cli_type, json_data=open_data): + st.error("unable to configure route-map") + except Exception as e: + st.log("key not not found") + st.log(e) + return True else: st.log("Unsupported CLI TYPE - {}".format(cli_type)) return False @@ -3212,6 +5074,70 @@ def show_ip_bgp_route(dut, family='ipv4', **kwargs): command = "show bgp {}".format(family) elif cli_type == 'klish': command = "show bgp {} unicast".format(family) + elif cli_type in ["rest-patch", "rest-put"]: + rest_url = st.get_datastore(dut, 'rest_urls') + vrf = "default" + url = rest_url["bgp_routerid_state"].format(vrf) + output_router_id = get_rest(dut, rest_url=url) + if output_router_id and rest_status(output_router_id["status"]): + output_router_id = output_router_id["output"] + else: + output_router_id ={} + result = list() + router_id = "" + if output_router_id: + router_id = output_router_id["openconfig-network-instance:router-id"] + if not router_id: + return result + + if family != "ipv4": + url = rest_url['bgp_unicast_config'].format(vrf, "IPV6_UNICAST", "ipv6-unicast") + key_val = "openconfig-network-instance:ipv6-unicast" + else: + url = rest_url['bgp_unicast_config'].format(vrf, "IPV4_UNICAST", "ipv4-unicast") + key_val = "openconfig-network-instance:ipv4-unicast" + output = get_rest(dut, rest_url=url) + if output and rest_status(output["status"]): + output = output["output"] + else: + return result + st.debug(output) + if not output: + st.log("Empty output from GET Call") + return result + # routes = output[key_val]["loc-rib"]["routes"]["route"] + if key_val in output: + if "loc-rib" in output[key_val]: + routes = output[key_val]["loc-rib"]["routes"]["route"] + else: + return result + else: + return result + as_path = {"IGP":"i", "EGP":"e" ,"?":"incomplete","INCOMPLETE" :"incomplete"} + for route in routes: + show_output = dict() + show_output["router_id"] = router_id + show_output["network"] = route["prefix"] + show_output["weight"] = route["openconfig-bgp-ext:attr-sets"]["weight"] if "weight" in route["openconfig-bgp-ext:attr-sets"] else 0 + show_output["status_code"] = "*>" if route["state"]["valid-route"] is True else "" + show_output["metric"] = route["openconfig-bgp-ext:attr-sets"]["med"] if "med" in route["openconfig-bgp-ext:attr-sets"] else 0 + show_output["as_path"] = as_path[route["openconfig-bgp-ext:attr-sets"]["origin"]] + members = "" + if "as-path" in route["openconfig-bgp-ext:attr-sets"]: + route_as_path = route["openconfig-bgp-ext:attr-sets"]["as-path"] + if "as-segment" in route_as_path: + route_as_segment = route_as_path["as-segment"][0] + if "state" in route_as_segment: + members = ' '.join([str(item) for item in route_as_segment["state"]["member"]]) + show_output["as_path"] = "{} {}".format(members, as_path[route["openconfig-bgp-ext:attr-sets"]["origin"]]) + show_output["next_hop"] = route["openconfig-bgp-ext:attr-sets"]["next-hop"] + show_output["version"] = "" + show_output["vrf_id"] = vrf + show_output["local_pref"] = route["openconfig-bgp-ext:attr-sets"]["local-pref"] if "local-pref" in route["openconfig-bgp-ext:attr-sets"] else 32768 + show_output["internal"] = "" + result.append(show_output) + st.debug(result) + return result return st.show(dut, command, type=cli_type) def fetch_ip_bgp_route(dut, family='ipv4', match=None, select=None, **kwargs): @@ -3229,7 +5155,9 @@ def get_ip_bgp_route(dut, family='ipv4', **kwargs): for each in kwargs.keys(): match = {each: kwargs[each]} get_list = ["network", "as_path"] + st.log(match) entries = filter_and_select(output, get_list, match) + st.log(entries) if not entries: st.log("Could not get bgp route info") return False @@ -3406,6 +5334,7 @@ class ASPathAccessList: def __init__(self, name, cli_type=''): self.name = name self.cli_type = get_cfg_cli_type(None, cli_type=cli_type) + self.cli_type = 'klish' if self.cli_type in ['rest-patch','rest-put'] else self.cli_type self.match_sequence = [] if self.cli_type == 'vtysh': self.cmdkeyword = 'bgp as-path access-list' diff --git a/spytest/apis/routing/dhcp_relay.py b/spytest/apis/routing/dhcp_relay.py index caf03304c53..4f897c35b71 100644 --- a/spytest/apis/routing/dhcp_relay.py +++ b/spytest/apis/routing/dhcp_relay.py @@ -1,8 +1,8 @@ from spytest import st from apis.system.rest import config_rest, get_rest, delete_rest from apis.routing.ip import get_interface_ip_address -from utilities.utils import make_list, remove_last_line_from_string, get_interface_number_from_name -from utilities.common import filter_and_select +from utilities.utils import remove_last_line_from_string, get_interface_number_from_name +from utilities.common import make_list, filter_and_select, iterable @@ -481,7 +481,7 @@ def verify_dhcp_relay_detailed(dut, interface, **kwargs): st.log("{} and {} is not match ".format(each, kwargs[each])) return False if kwargs.get("server_addr"): - for result in output: + for result in iterable(output): if result.get("server_addr"): server_addr = result.get("server_addr") break diff --git a/spytest/apis/routing/evpn.py b/spytest/apis/routing/evpn.py index 7fa941ca90b..a3c43997760 100644 --- a/spytest/apis/routing/evpn.py +++ b/spytest/apis/routing/evpn.py @@ -846,7 +846,7 @@ def verify_bgp_l2vpn_evpn_summary(dut,**kwargs): try: index_num = kwargs["neighbor"].index(evpn_neigh) exp_status=kwargs["updown"][index_num] - if neigh['state']['prefixes']['received'] > 0: + if neigh['state']['prefixes']['received'] >= 0: status="up" else: status="down" @@ -1674,8 +1674,6 @@ def verify_vxlan_tunnel_status(dut, src_vtep, rem_vtep_list, exp_status_list, cl rest_urls = st.get_datastore(dut, "rest_urls") url = rest_urls['vxlan_tunnel_info'] response = get_rest(dut, rest_url=url) - st.log('KLISH output for debugging REST') - st.show(dut, 'show vxlan tunnel', type='klish') if response['output']: cli_out = parse_rest_output_vxlan_tunnel(response) else: @@ -3170,13 +3168,14 @@ def verify_linktrack_summary(dut,**kwargs): return ret_val -def parse_rest_output_linktrack_group(dut,response,timeout,description=''): +def parse_rest_output_linktrack_group(dut,response,timeout,description='',lst_bringup_time='0'): lst_interfaces = response['output']['openconfig-lst-ext:interface'] result = [] for interface in lst_interfaces: lst_dict = {} lst_dict['description'] = description lst_dict['timeout'] = timeout + lst_dict['startup_remain_time'] = lst_bringup_time if 'upstream-groups' in interface.keys(): lst_dict['name'] = interface.get('upstream-groups',{}).get('upstream-group',[])[0].get('group-name',"") lst_dict['direction'] = "Upstream" @@ -3244,14 +3243,16 @@ def verify_linktrack_group_name(dut,**kwargs): url = rest_urls['get_link_track_timeout'].format(lst_name) lst_timeout = str(get_rest(dut, rest_url=url)['output']['openconfig-lst-ext:timeout']) + url = rest_urls['get_link_track_bringup_remain_time'].format(lst_name) + lst_bringup_time = str(get_rest(dut, rest_url=url)['output']['openconfig-lst-ext:bringup-remaining-time']) + url = rest_urls['get_link_track_interfaces'] response = get_rest(dut, rest_url=url) st.log('KLISH output for debugging REST') st.show(dut, 'show link state tracking {}'.format(kwargs['name']), type='klish') - if 'startup_remain_time' in kwargs: - del kwargs['startup_remain_time'] + if response['output']: - output = parse_rest_output_linktrack_group(dut,response,lst_timeout,lst_description) + output = parse_rest_output_linktrack_group(dut,response,lst_timeout,lst_description,lst_bringup_time) else: st.error("OCYANG-FAIL: verify link track group - Get Response is empty") return False @@ -3346,6 +3347,7 @@ def get_port_counters(dut, port, counter,**kwargs): if cli_type == "click": if '/' in prt: prt = st.get_other_names(dut,[prt])[0] + st.show(dut, "show interface counters -i {}".format(prt),type=cli_type) output = st.show(dut, "show interface counters -i {}".format(prt),type=cli_type) entries = filter_and_select(output, (cntr,), {'iface': prt}) list1.append(entries[0]) @@ -3431,15 +3433,16 @@ def neigh_suppress_config(dut, vlan, config='yes', skip_error=False, cli_type='' rest_urls = st.get_datastore(dut, "rest_urls") vlan_data = str(vlan) if type(vlan) is not str else vlan vlan_str = 'Vlan' + vlan_data if 'Vlan' not in vlan_data else vlan_data - url = rest_urls['vxlan_arp_nd_suppress'].format(vlan_str) payload = {"openconfig-vxlan:config":{ "arp-and-nd-suppress":"ENABLE"} } if config == 'yes': + url = rest_urls['vxlan_arp_nd_suppress'].format(vlan_str) if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): st.banner('FAIL-OCYANG: ARP and ND suppress config on vlan Failed') return False else: + url = rest_urls['vxlan_arp_nd_suppress_delete'].format(vlan_str) if not delete_rest(dut, rest_url=url): st.banner('FAIL-OCYANG: ARP and ND suppress UnConfig on vlan Failed') return False diff --git a/spytest/apis/routing/ip.py b/spytest/apis/routing/ip.py index 0b08ef28348..066cf0ca7f8 100644 --- a/spytest/apis/routing/ip.py +++ b/spytest/apis/routing/ip.py @@ -176,11 +176,14 @@ def config_ip_addr_interface(dut, interface_name='', ip_address='', subnet='', f st.error("Invalid IPv6 address.") return False command = "config interface ip add {} {}/{}".format(interface_name, ip_address, subnet) - st.config(dut, command, skip_error_check=skip_error) + output = st.config(dut, command, skip_error_check=skip_error) except Exception as e: st.log(e) return False - return True + if "Error: " in output: + return False + else: + return True elif config == 'remove': return delete_ip_interface(dut, interface_name, ip_address, subnet, family, cli_type=cli_type) else: @@ -245,8 +248,16 @@ def config_ip_addr_interface(dut, interface_name='', ip_address='', subnet='', f if not config_loopback_interfaces(dut, loopback_name=interface_name.capitalize(), cli_type=cli_type): st.error("msg", "Failed to create loopback interface") return False - url = rest_urls['sub_interface_config'].format(interface_name) - ip_config = {"openconfig-interfaces:subinterfaces": {"subinterface": [{"index": int(index), "config": {"index": int(index)}, "openconfig-if-ip:{}".format(family): {"addresses": {"address": [{"ip": ip_address, "config": {"ip": ip_address, "prefix-length": int(subnet)}}]}}}]}} + if is_secondary_ip == 'yes': + url = rest_urls['sub_interface_config'].format(interface_name) + ip_config = {"openconfig-interfaces:subinterfaces": {"subinterface": [{"index": int(index), "config": {"index": int(index)}, + "openconfig-if-ip:{}".format(family): {"addresses": {"address": [{"ip": ip_address, "config": {"ip": ip_address, + "prefix-length": int(subnet),"openconfig-interfaces-ext:secondary":True}}]}}}]}} + else: + url = rest_urls['sub_interface_config'].format(interface_name) + ip_config = {"openconfig-interfaces:subinterfaces": {"subinterface": [{"index": int(index), "config": {"index": int(index)}, + "openconfig-if-ip:{}".format(family): {"addresses": {"address": [{"ip": ip_address, "config": {"ip": ip_address, + "prefix-length": int(subnet)}}]}}}]}} else: if is_secondary_ip == 'yes': url = rest_urls['sub_interface_config'].format(interface_name) @@ -2494,12 +2505,18 @@ def verify_ip_sla_history(dut,inst,**kwargs): success = True cli_type = kwargs.pop('cli_type',st.get_ui_type(dut, **kwargs)) - if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' if cli_type == "click": cli_type="vtysh" - cli_out=st.show(dut,"show ip sla {} history".format(inst),type=cli_type) + if cli_type not in ['rest-patch', 'rest-put']: + cli_out=st.show(dut,"show ip sla {} history".format(inst),type=cli_type) + else: + rest_urls = st.get_datastore(dut, 'rest_urls') + rest_url = rest_urls['show_ip_sla_history'] + ocdata = {"openconfig-ip-sla:input":{"ip-sla-id":inst}} + output = config_rest(dut,rest_url=rest_url,http_method='post',json_data=ocdata,get_response=True)['output'] + cli_out = convert_sla_rest_output(output,parse_type='history') if "return_output" in kwargs: return cli_out @@ -2624,8 +2641,12 @@ def convert_sla_rest_output(output,parse_type='sla_summary'): transformed_output['last_chg'] = (item.get('state',{}).get('timestamp','').strip(r'\s*ago')).rstrip() transformed_output_list.append(transformed_output) else: - #history code will be added once klish defect gets fixed - pass + new_output = output.get('sonic-ip-sla:output',{}).get('IPSLA_HISTORY',[]) + for item in new_output: + transformed_output={} + transformed_output['event'] = item.get('event','') + transformed_output['event_time']= item.get('timestamp','') + transformed_output_list.append(transformed_output) return transformed_output_list @@ -2737,6 +2758,9 @@ def config_system_max_routes(dut,**kwargs): cmd ='' if cli_type == 'click': cmd = 'sudo config switch-resource route-scale routes {} -y'.format(route_cnt) + if not st.is_feature_supported("config_max_route_scale", dut): + st.community_unsupported(cmd, dut) + return False st.config(dut,cmd,type=cli_type) def config_ip_loadshare_hash(dut, **kwargs): @@ -2816,7 +2840,7 @@ def show_ip_loadshare(dut, **kwargs): rest_urls = st.get_datastore(dut, 'rest_urls') url = rest_urls['ecmp_show_ip_loadshare_hash'] out = get_rest(dut, rest_url=url) - var=out['output']['openconfig-loadshare-mode-ext:seed-attrs']['state'] + var=out['output']['openconfig-loadshare-mode-ext:state'] ip_var='' ipv6_var='' seed_var=str(var['ecmp-hash-seed']) @@ -2865,3 +2889,55 @@ def verify_ip_loadshare(dut, **kwargs): st.error("{} not found in the output.".format(key)) return_key = False return return_key + + +def create_route_leak(dut, vrf, network, **kwargs): + """ + To configure route leak + Author: Jagadish Chatrasi (jagadish.chatrasi@broadcom.com) + """ + #cli_type = st.get_ui_type(dut, **kwargs) + #cli_type = 'vtysh' if cli_type == 'click' else cli_type + cli_type = 'vtysh' ##CLI_TYPE hard-coded because the route leak configuration support is not available in other UIs + family = kwargs.get('family', 'ipv4') + if not (network and '/' in network): + st.error("Provide network with proper format") + return False + if family.lower() == 'ipv4': + network = ipaddress.IPv4Network(u'{}'.format(network), strict=False) + network = network.compressed + elif family.lower() == 'ipv6': + network = ipaddress.IPv6Network(u'{}'.format(network), strict=False) + network = network.compressed + else: + st.error("IP family should be ipv4/ipv6, but {} found".format(family)) + return False + if cli_type == 'vtysh': + command = list() + command.append("vrf {}".format(vrf)) + cmd = 'ip route {}'.format(network) if family.lower() == 'ipv4' else 'ipv6 route {}'.format(network) + if kwargs.get('next_hop'): + cmd+=' {}'.format(kwargs['next_hop']) + if kwargs.get('interface'): + cmd+=' {}'.format(kwargs['interface']) + if kwargs.get('nexthop_vrf'): + cmd+=' nexthop-vrf {}'.format(kwargs['nexthop_vrf']) + if kwargs.get('tag'): + cmd+=' tag {}'.format(kwargs['tag']) + if kwargs.get('track'): + cmd+=' track {}'.format(kwargs['track']) + if kwargs.get('table'): + cmd+=' table {}'.format(kwargs['table']) + if kwargs.get('label'): + cmd+=' label {}'.format(kwargs['label']) + if kwargs.get('onlink'): + cmd+=' onlink' + if kwargs.get('distance'): + cmd+=' {}'.format(kwargs['distance']) + command.append(cmd) + command.append('exit-vrf') + st.config(dut, command, type='vtysh') + else: + st.error("Unsupported CLI_TYPE: {}".format(cli_type)) + return False + return True diff --git a/spytest/apis/routing/ip_bgp.py b/spytest/apis/routing/ip_bgp.py index 534b93d77a5..3c3973d431f 100644 --- a/spytest/apis/routing/ip_bgp.py +++ b/spytest/apis/routing/ip_bgp.py @@ -1,5 +1,6 @@ from spytest.utils import filter_and_select from spytest import st +from apis.routing.bgp import show_bgp_ipv4_summary_vtysh, show_bgp_ipv6_summary_vtysh, show_bgp_ipv4_neighbor_vtysh, show_bgp_ipv6_neighbor_vtysh def verify_bgp_neighbor(dut,**kwargs): @@ -26,7 +27,6 @@ def verify_bgp_neighbor(dut,**kwargs): """ cli_type = kwargs.pop('cli_type',st.get_ui_type(dut, **kwargs)) cli_type = 'vtysh' if cli_type in ['vtysh', 'click'] else cli_type - if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' result = False @@ -61,6 +61,18 @@ def verify_bgp_neighbor(dut,**kwargs): cmd = "show bgp {} unicast vrf {} neighbors {}".format(family,vrf,kwargs['neighborip'][0]) else: cmd = "show bgp {} unicast vrf {} neighbors".format(family,vrf) + elif cli_type in ["rest-patch", "rest-put"]: + if family == "ipv4": + if len(kwargs['neighborip']) == 1: + output = show_bgp_ipv4_neighbor_vtysh(dut, neighbor_ip=kwargs['neighborip'][0], vrf=vrf, cli_type=cli_type) + else: + output = show_bgp_ipv4_neighbor_vtysh(dut, vrf=vrf, cli_type=cli_type) + else: + if len(kwargs['neighborip']) == 1: + output = show_bgp_ipv6_neighbor_vtysh(dut, neighbor_ip=kwargs['neighborip'][0], vrf=vrf, cli_type=cli_type) + else: + output = show_bgp_ipv6_neighbor_vtysh(dut, vrf=vrf, cli_type=cli_type) + else: if cli_type == 'vtysh': if len(kwargs['neighborip']) == 1: @@ -72,7 +84,19 @@ def verify_bgp_neighbor(dut,**kwargs): cmd = "show bgp {} unicast neighbors {}".format(family,kwargs['neighborip'][0]) else: cmd = "show bgp {} unicast neighbors".format(family) - output = st.show(dut, cmd, type=cli_type) + elif cli_type in ["rest-patch", "rest-put"]: + if family == "ipv4": + if len(kwargs['neighborip']) == 1: + output = show_bgp_ipv4_neighbor_vtysh(dut, neighbor_ip=kwargs['neighborip'][0], vrf="default", cli_type=cli_type) + else: + output = show_bgp_ipv4_neighbor_vtysh(dut, vrf=vrf, cli_type=cli_type) + else: + if len(kwargs['neighborip']) == 1: + output = show_bgp_ipv6_neighbor_vtysh(dut, neighbor_ip=kwargs['neighborip'][0], vrf="default", cli_type=cli_type) + else: + output = show_bgp_ipv6_neighbor_vtysh(dut, vrf=vrf, cli_type=cli_type) + if cli_type not in ["rest-patch", "rest-put"]: + output = st.show(dut, cmd, type=cli_type) #Get the index of peer from list of parsed output for i in range(len(kwargs['neighborip'])): @@ -117,7 +141,7 @@ def check_bgp_session(dut,nbr_list=[],state_list=[],vrf_name='default',**kwargs) """ ret_val = True cli_type = kwargs.pop('cli_type', st.get_ui_type(dut,**kwargs)) - if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' + # if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' output=[] if cli_type == 'click': if vrf_name == 'default': @@ -130,6 +154,12 @@ def check_bgp_session(dut,nbr_list=[],state_list=[],vrf_name='default',**kwargs) output = st.show(dut,'show bgp {} unicast summary'.format(family), type='klish') else: output = st.show(dut, 'show bgp {} unicast vrf {} summary'.format(family,vrf_name), type='klish') + elif cli_type in ["rest-put", "rest-patch"]: + family = kwargs.pop('family', 'ipv4') + if family == "ipv4": + output = show_bgp_ipv4_summary_vtysh(dut, vrf_name, cli_type=cli_type) + else: + output = show_bgp_ipv6_summary_vtysh(dut, vrf_name, cli_type=cli_type) if len(output)!=0: for nbr,state in zip(nbr_list,state_list): @@ -140,13 +170,13 @@ def check_bgp_session(dut,nbr_list=[],state_list=[],vrf_name='default',**kwargs) ret_val = False for entries in entry: if state=='Established': - if entries['state'].isdigit(): + if entries['state'].isdigit() or entries['state'] == "ESTABLISHED": st.log("BGP Neighbor {} in Established state".format(nbr)) else: st.error("BGP Neighbor {} state check Failed. Expected:{} Actual :{} ".format(nbr,state,entries['state'])) ret_val=False else: - if state == entries['state']: + if str(state).upper() == str(entries['state']).upper(): st.log("BGP Neighbor {} check passed. Expected : {} Actual {}".format(nbr,state,entries['state'])) else: st.error("BGP Neighbor {} state check Failed. Expected:{} Actual :{} ".format(nbr, state, entries['state'])) diff --git a/spytest/apis/routing/nat.py b/spytest/apis/routing/nat.py index bd0019a7076..d79c06a5a71 100644 --- a/spytest/apis/routing/nat.py +++ b/spytest/apis/routing/nat.py @@ -638,6 +638,25 @@ def poll_for_nat_statistics(dut, itr=15, delay=2, **kwargs): st.wait(delay) i += 1 +def poll_for_twice_nat_statistics(dut, itr=15, delay=2, **kwargs): + """ + :param :dut: + :param :acl_table: + :param :acl_rule: + :param :itr: + :param :delay: + :return: + """ + i = 1 + while True: + result = verify_twice_nat_statistics(dut, **kwargs) + if result is None: + if i >= itr: + return None + st.wait(delay) + i += 1 + else: + return result def verify_nat_statistics(dut, **kwargs): """ @@ -679,6 +698,37 @@ def verify_nat_statistics(dut, **kwargs): return False +def verify_twice_nat_statistics(dut, **kwargs): + """ + Verify Twice NAT Statistics + + :param dut: + :return: + """ + stats = ['packets', 'bytes'] + cli_type = st.get_ui_type(dut, **kwargs) + output = show_nat_statistics(dut, cli_type=cli_type) + match = {} + + if 'protocol' in kwargs: + match['protocol'] = kwargs['protocol'] + if 'src_ip' in kwargs: + match['src_ip'] = kwargs['src_ip'] + if 'src_ip_port' in kwargs: + match['src_ip_port'] = kwargs['src_ip_port'] + if 'dst_ip' in kwargs: + match['dst_ip'] = kwargs['dst_ip'] + if 'dst_ip_port' in kwargs: + match['dst_ip_port'] = kwargs['dst_ip_port'] + + st.debug(match) + entries = filter_and_select(output, stats, match) + + if entries and (int(entries[0]['packets']) > 0): + return entries + else: + return None + def get_nat_translations(dut, **kwargs): """ Get NAT Translations diff --git a/spytest/apis/routing/ospf.py b/spytest/apis/routing/ospf.py index 7469852662d..44f36cb730c 100644 --- a/spytest/apis/routing/ospf.py +++ b/spytest/apis/routing/ospf.py @@ -1,6 +1,6 @@ # OSPFv2 APIs # Author: Naveena Suvarna (naveen.suvarna@broadcom.com) - +from __future__ import division from spytest import st from spytest.utils import filter_and_select from utilities.utils import get_interface_number_from_name @@ -2797,7 +2797,7 @@ def rest_command_output_parsing(dut, vrf, type): temp['vrfname'] = vrf temp['name'] = state_info['id'] temp['ipv4'] = state_info['openconfig-ospfv2-ext:address'] + '/' + str(state_info['openconfig-ospfv2-ext:address-len']) - temp['hellotmr'] = str(int(temp['hellotmr'])/1000) + temp['hellotmr'] = str(int(int(temp['hellotmr'])/1000)) temp['hellodue'] = str(float(temp['hellodue'])/1000) temp['nwtype'] = temp['nwtype'].upper() temp['txdelay'] = str(temp['txdelay']) diff --git a/spytest/apis/routing/pim.py b/spytest/apis/routing/pim.py index 7a79cf28f77..476b0528d32 100644 --- a/spytest/apis/routing/pim.py +++ b/spytest/apis/routing/pim.py @@ -1,3 +1,4 @@ +from __future__ import division from spytest.utils import filter_and_select from spytest import st import re @@ -528,8 +529,9 @@ def verify_pim_show(dut,**kwargs): pattern = re.compile(r'\w+') result = pattern.findall(entry['oif']) res = result[::2] + result[1::2] - oif_list = [str(oif) for oif in res[:len(res) / 2]] - flag_list = [str(flag) for flag in res[len(res) / 2:]] + res_by_2 = int(len(res)/2) + oif_list = [str(oif) for oif in res[:res_by_2]] + flag_list = [str(flag) for flag in res[res_by_2:]] output[entry_index]['oif'] = oif_list output[entry_index]['flag'] = flag_list #Converting all kwargs to list type to handle single or list of mroute instances diff --git a/spytest/apis/security/rbac.py b/spytest/apis/security/rbac.py index f07bd137768..d5ce99cc17a 100644 --- a/spytest/apis/security/rbac.py +++ b/spytest/apis/security/rbac.py @@ -121,7 +121,7 @@ def gnmi_call(dut, **kwargs): st.report_fail('rbac_call_fail', "gNMI", mode, login_type) msg = 'Failed to execute set command using gNMI session with mode- {mode}, type- {login_type}' - if mode == 'rw' and "op: UPDATE" not in gnmi_set_out and "description" not in str(gnmi_get_out): + if mode == 'rw' and "op: UPDATE" not in str(gnmi_set_out) and "description" not in str(gnmi_get_out): st.error(msg.format(**kwargs)) result2 = False if mode == 'ro' and not gnmi_set_out and "description" not in str(gnmi_get_out): @@ -160,7 +160,7 @@ def rest_rbac_call(dut, **kwargs): url = kwargs.get('url') if model == 'oc-yang': operation1 = {"openconfig-interfaces:mtu": 9216} - operation2 = {"openconfig-interfaces:mtu": 9100} + operation2 = {"openconfig-interfaces:mtu": 9100} else: operation1 = {"sonic-port:admin_status": "down"} operation2 = {"sonic-port:admin_status": "up"} @@ -223,4 +223,4 @@ def add_user(dut, username): :return: """ st.config(dut, 'usermod -s /bin/bash {}'.format(username)) - return True \ No newline at end of file + return True diff --git a/spytest/apis/switching/mac.py b/spytest/apis/switching/mac.py index 7454efed25b..2e56ad399c4 100644 --- a/spytest/apis/switching/mac.py +++ b/spytest/apis/switching/mac.py @@ -2,12 +2,12 @@ import json from spytest import st -from spytest.utils import filter_and_select import apis.common.wait as waitapi from apis.system.rest import get_rest,delete_rest,config_rest from utilities.utils import get_interface_number_from_name +from utilities.common import filter_and_select, iterable def get_mac(dut,**kwargs): @@ -109,9 +109,8 @@ def get_mac_entries_by_mac_address(dut, mac_address, **kwargs): mac_entries = st.show(dut, command,type=cli_type) elif cli_type in ["rest-patch", "rest-put"]: entries = get_mac(dut, cli_type=cli_type) - st.debug(entries) mac_entries = [] - for entry in entries: + for entry in iterable(entries): exp = "^{}".format(mac_address.strip()) if re.search(exp, entry['macaddress'], re.IGNORECASE): mac_entries.append(entry) @@ -134,6 +133,9 @@ def get_mac_count(dut, cli_type=""): if cli_type == 'click': field = "mac_count" command = "show mac count" + if not st.is_feature_supported("show-mac-count-command", dut): + st.community_unsupported(command, dut) + return 0 output = st.show(dut, command, type=cli_type) elif cli_type == 'klish': field = "count" @@ -346,7 +348,6 @@ def delete_mac(dut, mac, vlan, cli_type=""): else: st.log("Unsupported cli") return False - return True def config_mac_agetime(dut, agetime, cli_type="", config= "add", **kwargs): @@ -391,11 +392,12 @@ def get_mac_agetime(dut, cli_type=""): command = '' cli_type = st.get_ui_type(dut, cli_type=cli_type) if cli_type == 'click': - command = "show mac aging_time" if st.is_feature_supported("show-mac-aging-time-command", dut): command = "show mac aging-time" - if st.is_feature_supported("show-mac-aging_time-command", dut): - st.community_unsupported(command, dut) + elif st.is_feature_supported("show-mac-aging_time-command", dut): + command = "show mac aging_time" + else: + st.community_unsupported("show mac aging-time", dut) return 300 elif cli_type == 'klish': command = "show mac address-table aging-time" @@ -566,7 +568,7 @@ def configure_macmove_threshold(dut, **kwargs): """ cli_type = kwargs.get("cli_type", st.get_ui_type(dut)) - if cli_type in ["rest-put", "rest-patch"]: cli_type = 'click' + if cli_type in ["rest-put", "rest-patch"]: cli_type = 'klish' command ='' if cli_type == "click": if 'count' in kwargs: @@ -575,9 +577,9 @@ def configure_macmove_threshold(dut, **kwargs): command += " config mac dampening_threshold_interval {}\n".format(kwargs['interval']) elif cli_type == "klish": if 'count' in kwargs: - command = " mac address-table dampening-threshold {}\n".format(kwargs['count']) + command = "mac address-table dampening-threshold {}\n".format(kwargs['count']) if 'interval' in kwargs: - st.log('cmd is not available in klish') + command += "mac address-table dampening-interval {}\n".format(kwargs['interval']) elif cli_type in ["rest-put", "rest-patch"]: st.log('Needs to add rest url support') #url = st.get_datastore(dut, "rest_urls")["config_interface"] @@ -600,12 +602,14 @@ def verify_mac_dampening_threshold(dut, **kwargs): usage verify_mac_dampening_threshold(dut1,count=2) """ - cli_type = kwargs.get("cli_type", st.get_ui_type(dut)) - if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' + #cli_type = kwargs.get("cli_type", st.get_ui_type(dut)) + #if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' + ## Changes in klish command as part of bug fix, fallback to click till fixing templates. + cli_type = 'click' if cli_type == 'click': cmd = "show mac dampening_threshold" else: - cmd = "show mac dampening-threshold" + cmd = "show mac dampening" parsed_output = st.show(dut,cmd,type=cli_type) if len(parsed_output) == 0: st.error("OUTPUT is Empty") @@ -626,17 +630,20 @@ def verify_mac_dampening_disabled_ports(dut, **kwargs): verify_mac_dampening_disabled_ports(data.dut1, port_list = ['Ethernet127','Ethernet126']) """ + parsed_output = [] cli_type = kwargs.get("cli_type", st.get_ui_type(dut)) if cli_type in ['rest-patch', 'rest-put']: cli_type = 'klish' if cli_type == 'click': cmd = "show mac dampening_disabled_ports" else: cmd = "show mac dampening-disabled-ports" + parsed_output = st.show(dut,cmd,type=cli_type) if len(parsed_output) == 0: - st.error("OUTPUT is Empty") - return False + ### Klish output empty when disabled ports are not there + parsed_output = [{'port_list':['None']}] + st.log("DEBUG==>{}".format(parsed_output)) if 'return_output' in kwargs: return parsed_output diff --git a/spytest/apis/switching/mclag.py b/spytest/apis/switching/mclag.py index b32a9a2c57c..36f1b8330f0 100644 --- a/spytest/apis/switching/mclag.py +++ b/spytest/apis/switching/mclag.py @@ -5,7 +5,7 @@ import spytest.utils as utils from apis.common import redis from utilities.utils import get_interface_number_from_name -from apis.system.rest import get_rest +from apis.system.rest import get_rest, config_rest, delete_rest def config_domain(dut,domain_id,**kwargs): ''' @@ -399,6 +399,19 @@ def config_mclag_system_mac(dut,skip_error=False, **kwargs): if "Could not connect to Management REST Server" in output: st.error("klish mode not working.") return False + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, 'rest_urls') + url = rest_urls['mclag_config_mclag_system_mac'].format(int(domain_id)) + if config == "add": + payload = {"openconfig-mclag:mclag-system-mac": mac_val} + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + else: + if not delete_rest(dut, rest_url=url): + return False + else: + st.log("Invalid cli_type provided: {}".format(cli_type)) + return False return True def verify_domain(dut,**kwargs): diff --git a/spytest/apis/switching/portchannel.py b/spytest/apis/switching/portchannel.py index 8dfb83869d1..1f5a71747d1 100644 --- a/spytest/apis/switching/portchannel.py +++ b/spytest/apis/switching/portchannel.py @@ -30,7 +30,10 @@ def create_portchannel(dut, portchannel_list=[], fallback=False, min_link="", st for portchannel_name in utils.make_list(portchannel_list): if not fallback: if not min_link: - command = "config portchannel add {} --static=true".format(portchannel_name) + static_flag = "--static=true" if static else "" + command = "config portchannel add {} {}".format(portchannel_name, static_flag).strip() + if not st.is_feature_supported("config_static_portchannel", dut): + command = "config portchannel add {}".format(portchannel_name) else: command = "config portchannel add {} --min-links {}".format(portchannel_name, min_link) else: @@ -148,7 +151,8 @@ def delete_all_portchannels(dut, cli_type=""): cli_type = st.get_ui_type(dut, cli_type=cli_type) available_portchannels = list() st.log("Deleting all availabe port channels...", dut=dut) - for portchannel in get_portchannel_list(dut, cli_type=cli_type): + output = get_portchannel_list(dut, cli_type=cli_type) + for portchannel in utils.iterable(output): if cli_type == "click": available_portchannels.append(portchannel["teamdev"]) elif cli_type in ["klish","rest-patch","rest-put"]: @@ -409,7 +413,7 @@ def verify_portchannel_member(dut, portchannel, members, flag='add', cli_type="" return True elif flag == 'del': for member in utils.make_list(members): - if member in portchannel_members: + if member in utils.iterable(portchannel_members): return False return True @@ -1229,7 +1233,7 @@ def get_portchannel_names(dut, cli_type=''): cli_type = st.get_ui_type(dut, cli_type=cli_type) result = [] output = get_portchannel_list(dut, cli_type=cli_type) - for entry in output: + for entry in utils.iterable(output): if cli_type == "click": result.append(entry['teamdev']) elif cli_type in ["klish","rest-put","rest-patch"]: diff --git a/spytest/apis/switching/pvst.py b/spytest/apis/switching/pvst.py index 75ecaa10ebf..cfb08e273e8 100755 --- a/spytest/apis/switching/pvst.py +++ b/spytest/apis/switching/pvst.py @@ -380,7 +380,8 @@ def config_stp_interface_params(dut, iface, **kwargs): else: command.append("{} spanning-tree {}".format(no_form, click_2_klish[each_key])) command.append('exit') - if not st.config(dut, command, skip_error_check=True, type=cli_type): + out = st.config(dut, command, skip_error_check=True, type=cli_type) + if "%Error" in out: return False return True elif cli_type in ["rest-put", "rest-patch"]: @@ -1278,7 +1279,7 @@ def get_stp_next_root_port(dut, vlan, cli_type=""): # Preparing DATA to process and find the next Root/Forwarding port. cut_data = {} pc_list = [] - for lag_intf in portchannel.get_portchannel_list(partner): + for lag_intf in cutils.iterable(portchannel.get_portchannel_list(partner)): if cli_type == "click": pc_list.append(lag_intf["teamdev"]) elif cli_type in ["klish", "rest-put", "rest-patch"]: @@ -1405,6 +1406,7 @@ def check_for_single_root_bridge_per_vlan(dut_list, vlan_list, dut_vlan_data, cl st.log("Verifying the single root bridge per vlan ...") dut_li = list([str(e) for e in dut_list]) if isinstance(dut_list, list) else [dut_list] vlan_li = list([str(e) for e in vlan_list]) if isinstance(vlan_list, list) else [vlan_list] + st.log("DUT LIST : {}, VLAN LIST :{}".format(dut_list, vlan_list)) if len(vlan_list) != len(dut_list): st.log("Invalid data provided to check the root bridge per vlan ...") st.report_fail("invalid_data_for_root_bridge_per_vlan") diff --git a/spytest/apis/switching/pvst_elasticity_wrapper.py b/spytest/apis/switching/pvst_elasticity_wrapper.py index 50c8e890a18..0fedcee64a8 100755 --- a/spytest/apis/switching/pvst_elasticity_wrapper.py +++ b/spytest/apis/switching/pvst_elasticity_wrapper.py @@ -14,6 +14,7 @@ import utilities.utils as utils +tg_info = {} minimum_duts = 3 min_duts_tg_links = 3 min_links_between_duts = 3 @@ -747,8 +748,8 @@ def config_tg_streams(vars, device_data, vlan_list, mac_count=1): :param vlan_list: :return: """ + global src_tg1_vlan_inc_mac_fix_unknown, tg_info tg_info = {} - global src_tg1_vlan_inc_mac_fix_unknown tg_list = [device_data[each][0] for each in device_data.keys()] tg_handler = tgapi.get_handles(vars, tg_list) tg = tg_handler["tg"] diff --git a/spytest/apis/switching/vlan.py b/spytest/apis/switching/vlan.py index 94d9ce995d0..09c7d6c5eeb 100644 --- a/spytest/apis/switching/vlan.py +++ b/spytest/apis/switching/vlan.py @@ -2,7 +2,7 @@ # Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) import json from spytest import st -from utilities.common import random_vlan_list, filter_and_select, exec_foreach, make_list +from utilities.common import random_vlan_list, filter_and_select, exec_foreach, make_list, iterable from utilities.utils import get_interface_number_from_name, get_portchannel_name_for_rest from utilities.parallel import ensure_no_exception from apis.system.rest import config_rest, delete_rest, get_rest, rest_status @@ -434,7 +434,7 @@ def get_vlan_member(dut, vlan_list=[], cli_type = ''): temp += filter_and_select(out, None, {"vid": each}) out = temp - for each in out: + for each in iterable(out): if each['member']: if each['vid'] not in vlan_val: vlan_val[each['vid']] = [each['member']] @@ -463,7 +463,7 @@ def get_member_vlan(dut, interface_list=[], cli_type = ''): temp += filter_and_select(out, None, {"member": each}) out = temp - for each in out: + for each in iterable(out): if each['member']: if each['member'] not in member_val: member_val[each['member']] = [each['vid']] @@ -915,7 +915,7 @@ def show_vlan_from_rest_response(rest_response): vlan_participation_data = dict() if "Vlan" in intf_data.get("name"): vlan_data["vid"] = intf_data.get("name") - vlan_data["status"] = intf_data.get("state")["admin-status"] + vlan_data["status"] = intf_data.get("state")["admin-status"] if "admin-status" in intf_data.get("state") else "" if intf_data.get("openconfig-if-ethernet:ethernet"): if intf_data["openconfig-if-ethernet:ethernet"].get("openconfig-vlan:switched-vlan"): vlan_participation_data[intf_data["name"]] = dict() diff --git a/spytest/apis/system/basic.py b/spytest/apis/system/basic.py index 54babab73ab..f1810a9dfbf 100644 --- a/spytest/apis/system/basic.py +++ b/spytest/apis/system/basic.py @@ -9,7 +9,7 @@ from spytest.utils import exec_all import apis.system.connection as conn_obj -from utilities.common import delete_file, do_eval, make_list +from utilities.common import delete_file, do_eval, make_list, iterable import utilities.utils as utils_obj from apis.system.rest import get_rest,config_rest @@ -676,7 +676,7 @@ def copy_file_from_client_to_server(ssh_con_obj, **kwargs): st.report_fail("scp_file_transfer_failed", kwargs["src_path"], kwargs["dst_path"]) -def check_error_log(dut, file_path, error_string, lines=1, file_length=50, match=None): +def check_error_log(dut, file_path, error_string, lines=1, file_length=50, match=None, start_line=0): """ Author: Chaitanya Vella (chaitanya-vella.kumar@broadcom.com) Function to check the error log @@ -687,7 +687,10 @@ def check_error_log(dut, file_path, error_string, lines=1, file_length=50, match :param file_length: :return: """ - command = 'sudo tail -{} {} | grep "{}" | tail -{}'.format(file_length, file_path, error_string, lines) + if start_line != 0: + command = 'sudo tail -n +{} {} | grep "{}" | grep -Ev "sudo tail"'.format(start_line, file_path, error_string) + else: + command = 'sudo tail -{} {} | grep "{}" | tail -{}'.format(file_length, file_path, error_string, lines) try: response = utils_obj.remove_last_line_from_string(st.show(dut, command, skip_tmpl=True, skip_error_check=True)) result = response.find(error_string) > 1 if not match else response @@ -953,7 +956,7 @@ def get_platform_syseeprom_as_dict(dut, tlv_name=None, key='value', decode=False """ rv = {} output = get_platform_syseeprom(dut, tlv_name=tlv_name, key=key, decode=decode) - for each in output: + for each in iterable(output): rv[each['tlv_name']] = each[key] return rv @@ -1422,7 +1425,7 @@ def get_number_of_lines_in_file(connection_obj, file_path, device="server"): match = re.match(r'^\d+', result) if match: st.log("####### LINE NUMBER- {}".format(match.group(0))) - return match.group(0) + return int(match.group(0)) return line_number @@ -1624,21 +1627,19 @@ def delete_line_using_specific_string(connection_obj, specific_string, file_path return st.config(connection_obj, command) -def cmd_validator(dut, commands, cli_type='klish', error_list=[]): +def cmd_validator(dut, commands, cli_type='klish'): """ Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) :param dut: :param commands: :param cli_type: - :param error_list: :return: """ result = True - error_list.extend(['%Error']) + errs = ['%Error'] command_list = commands if isinstance(commands, list) else commands.split('\n') - st.log(command_list) out = st.config(dut, command_list, type=cli_type, skip_error_check=True) - for each in error_list: + for each in errs: if each in out: st.error("Error string '{}' found in command execution.".format(each)) result = False @@ -1886,8 +1887,8 @@ def get_mgmt_ip(dut, interface): return None def renew_mgmt_ip(dut, interface): - output_1 = st.config(dut, "/sbin/dhclient -v -r {}".format(interface), skip_error_check=True) - output_2 = st.config(dut, "/sbin/dhclient -v {}".format(interface), skip_error_check=True) + output_1 = st.config(dut, "/sbin/dhclient -v -r {}".format(interface), skip_error_check=True, expect_ipchange=True) + output_2 = st.config(dut, "/sbin/dhclient -v {}".format(interface), skip_error_check=True, expect_ipchange=True) return "\n".join([output_1, output_2]) def set_mgmt_vrf(dut, mgmt_vrf): @@ -2264,3 +2265,9 @@ def swss_config(dut, data): st.config(dut, "docker cp {0} swss:{1}".format(file_path1, file_path2)) st.config(dut, "docker exec -it swss bash -c 'swssconfig {0}'".format(file_path2)) + +def flush_iptable(dut): + st.config(dut, "iptables -F") + +def execute_linux_cmd(dut, cmd): + st.show(dut, cmd, skip_tmpl=True) diff --git a/spytest/apis/system/boot_up.py b/spytest/apis/system/boot_up.py index 143bc2e7bec..77c6fe23039 100644 --- a/spytest/apis/system/boot_up.py +++ b/spytest/apis/system/boot_up.py @@ -130,3 +130,24 @@ def sonic_installer_list(dut, **kwargs): retval["Available"] = availableList return retval +def get_onie_grub_config(dut, mode): + errs = ["/dev/sda2 does not exist", + "/mnt/onie-boot/onie/tools/bin/onie-boot-mode: command not found", + "No such file or directory", + "/mnt/onie-boot/: not mounted"] + cmds = """ + sudo apt-get -f install -y grub-common + sudo mkdir -p /mnt/onie-boot/ + sudo mount /dev/sda2 /mnt/onie-boot/ + sudo /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o {0} + sudo grub-editenv /mnt/onie-boot/grub/grubenv set diag_mode=none + sudo grub-editenv /mnt/onie-boot/grub/grubenv set onie_mode={0} + sudo grub-editenv /host/grub/grubenv set next_entry=ONIE + sudo grub-reboot --boot-directory=/host/ ONIE + sudo umount /mnt/onie-boot/ + sudo sync + sleep 5 + sudo sed -i 's|DEVPATH=./usr/share/sonic/device.|set -x ; \\n &|g' /usr/local/bin/reboot + """.format(mode).strip().splitlines() + return cmds, errs + diff --git a/spytest/apis/system/error_handling.py b/spytest/apis/system/error_handling.py index a288470c5b4..cbb44c1de3c 100644 --- a/spytest/apis/system/error_handling.py +++ b/spytest/apis/system/error_handling.py @@ -2,11 +2,14 @@ # Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) import re -from spytest import st -import apis.common.asic_bcm as asicapi -from apis.system.interface import verify_ifname_type -from spytest.utils import filter_and_select +from spytest import st, SpyTestDict +import apis.common.asic as asicapi from apis.common import redis +from apis.system.interface import verify_ifname_type + +from utilities.common import filter_and_select + +vars = SpyTestDict() def verify_error_db(dut, table, **kwargs): """ @@ -242,7 +245,7 @@ def verify_error_db_redis(dut, table, **kwargs): def verify_route_count_bcmshell(dut, route_count, af='ipv4', itter=30, delay=1, flag='ge', timeout=120): """ - Poll and verify the route count using bcmcmd + Poll and verify the route count using asic api Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) :param dut: :param route_count: @@ -256,9 +259,9 @@ def verify_route_count_bcmshell(dut, route_count, af='ipv4', itter=30, delay=1, i = 1 while True: if af == 'ipv4': - curr_count = asicapi.bcmcmd_route_count_hardware(dut, timeout=timeout) + curr_count = asicapi.get_ipv4_route_count(dut, timeout=timeout) if af == 'ipv6': - curr_count = asicapi.bcmcmd_ipv6_route_count_hardware(dut, timeout=timeout) + curr_count = asicapi.get_ipv6_route_count(dut, timeout=timeout) if flag == 'ge' and int(curr_count) >= int(route_count): st.log("Route count matched Provided {}, Detected {} - flag = {}".format(route_count, curr_count, flag)) @@ -409,14 +412,14 @@ def eh_bcm_debug_show(dut, af='both', table_type='all', ifname_type=None): st.banner("Error handling DEBUG Calls - START") if af == 'ipv4' or af == 'both': if table_type == 'route' or table_type == 'all': - asicapi.bcmcmd_l3_defip_show(dut) + asicapi.dump_l3_defip(dut) if table_type == 'nbr' or table_type == 'all': - asicapi.bcmcmd_l3_l3table_show(dut) + asicapi.dump_l3_l3table(dut) if af == 'ipv6' or af == 'both': if table_type == 'route' or table_type == 'all': - asicapi.bcmcmd_l3_ip6route_show(dut) + asicapi.dump_l3_ip6route(dut) if table_type == 'nbr' or table_type == 'all': - asicapi.bcmcmd_l3_ip6host_show(dut) + asicapi.dump_l3_ip6host(dut) if table_type == 'all': verify_show_error_db(dut, ifname_type=ifname_type) st.banner("Error handling DEBUG Calls - END") diff --git a/spytest/apis/system/interface.py b/spytest/apis/system/interface.py index 71e634060a3..49ed9d1bb4f 100644 --- a/spytest/apis/system/interface.py +++ b/spytest/apis/system/interface.py @@ -60,7 +60,7 @@ def interface_operation(dut, interfaces, operation="shutdown", skip_verify=True, interfaces_li = make_list(interfaces) if cli_type == "click": response = portapi.set_status(dut, interfaces_li, operation) - if "Error" in response: + if "Error" in str(response): st.log(response) return False @@ -220,7 +220,7 @@ def interface_properties_set(dut, interfaces_list, property, value, skip_error=F st.log("Provided fec value not supported ...") return False if value != "none": - command = " fec {}".format(value) + command = " fec {}".format(value.upper()) else: command = " fec off" else: @@ -537,7 +537,7 @@ def show_specific_interface_counters(dut, interface_name, cli_type=''): url = rest_urls['per_interface_details'].format(interface_name) output = [] result = get_rest(dut, rest_url = url, timeout=60) - processed_output = portapi.process_intf_counters_rest_output(result, single_port=True) + processed_output = portapi.process_intf_counters_rest_output(result) if processed_output: output.extend(processed_output) st.log(output) @@ -1304,22 +1304,15 @@ def verify_portgroup(dut, **kwargs): interface = kwargs.get("interface", None) portgroup = kwargs.get("portgroup", None) speed = kwargs.get("speed", None) - result = 0 output = show_portgroup(dut, interface=interface,cli_type=cli_type) if not output: st.log("Empty output observed - {}".format(output)) return False for data in output: if portgroup and str(data["portgroup"]) != str(portgroup): - result = 1 - else: - result = 0 + return False if speed and str(speed) not in data["speed"]: - result = 1 - else: - result = 0 - if result: - return False + return False return True def is_port_group_supported(dut, cli_type=""): diff --git a/spytest/apis/system/logging.py b/spytest/apis/system/logging.py index 73843a51345..1c2de324ee2 100644 --- a/spytest/apis/system/logging.py +++ b/spytest/apis/system/logging.py @@ -10,6 +10,7 @@ import apis.system.switch_configuration as sc_obj import utilities.utils as utils +from utilities.common import make_list log_files = [r'/var/log/syslog', r'/var/log/syslog.1'] @@ -133,21 +134,25 @@ def check_unwanted_logs_in_logging(dut, user_filter=None): """ Check unwanted log based on uers filter list Author: Prudvi Mangadu (prudvi.mangadu@broadcom.com) - :param dut: :param user_filter: :return: """ result = True - if user_filter is None: - user_filter = [] static_filter = ['i2c', 'fan', 'power'] - over_all_filter = static_filter + user_filter - for each_string in over_all_filter: - temp_count = get_logging_count(dut, filter_list=each_string) - st.log("{} - logs found on the error string '{}'".format(temp_count, each_string)) + over_all_filter = static_filter + make_list(user_filter) if user_filter else static_filter + for filter in over_all_filter: + temp_count = get_logging_count(dut, filter_list=filter) + st.debug("{} - logs found on the error string '{}'".format(temp_count, filter)) if temp_count: - result = False + if filter == 'fan': + filters = ["INFO system#monitor: MEM :: Name:fand"] + logs = show_logging(dut, filter_list=filter) + for log in logs: + if not any(fil.lower() in log.lower() for fil in filters): + result = False + else: + result = False return result diff --git a/spytest/apis/system/mirroring.py b/spytest/apis/system/mirroring.py index cf944485de2..1768c318368 100644 --- a/spytest/apis/system/mirroring.py +++ b/spytest/apis/system/mirroring.py @@ -41,11 +41,13 @@ def create_session(dut, **kwargs): st.log("Unsupported mirror type ..") return False if cli_type == "click": - command = "config mirror_session add " - if st.is_feature_supported("span-mirror-session", dut): - command += " {}".format(kwargs["mirror_type"]) - command += " {}".format(kwargs["session_name"]) - if "mirror_type" in kwargs and kwargs["mirror_type"] == "span": + if not st.is_feature_supported("span-mirror-session", dut): + command = "config mirror_session add {}".format(kwargs["session_name"]) + elif not st.is_feature_supported("config_mirror_session_add_type", dut): + command = "config mirror_session {} add {}".format(kwargs["mirror_type"], kwargs["session_name"]) + else: + command = "config mirror_session add {} {}".format(kwargs["mirror_type"], kwargs["session_name"]) + if kwargs["mirror_type"] == "span": if "destination_ifname" in kwargs: command += " {}".format(kwargs["destination_ifname"]) if "source_ifname" in kwargs: diff --git a/spytest/apis/system/ntp.py b/spytest/apis/system/ntp.py old mode 100755 new mode 100644 index 8f9385b98cd..17b7db801ef --- a/spytest/apis/system/ntp.py +++ b/spytest/apis/system/ntp.py @@ -1,14 +1,18 @@ - import json import re +import datetime +import copy from spytest import st -from spytest.utils import filter_and_select -from utilities.utils import ensure_service_params + +from utilities.common import filter_and_select, iterable, make_list +from utilities.utils import ensure_service_params, get_interface_number_from_name + from apis.system.rest import config_rest, get_rest, delete_rest +errors_list = ['error', 'invalid', 'usage', 'illegal', 'unrecognized'] + def add_ntp_servers(dut, iplist=[], cli_type=''): """ - :param dut: :param iplist: :return: @@ -71,7 +75,7 @@ def delete_ntp_servers(dut, cli_type=''): st.log("No servers to delete") return True else: - for ent in output: + for ent in iterable(output): server_ip = ent["remote"].strip("+*#o-x").strip() if cli_type == "click": commands.append("config ntp del {}".format(server_ip)) @@ -181,16 +185,16 @@ def verify_ntp_server_details(dut, server_ip=None, **kwargs): if not output: flag = 0 if server_ip is None: - if "No association ID's returned" in output: + if "No association ID's returned" in str(output): return True - elif "%Error: Resource not found" in output: + elif "%Error: Resource not found" in str(output): return True else: return False else: server_ips = [server_ip] if type(server_ip) is str else list([str(e) for e in server_ip]) data = kwargs - for ent in output: + for ent in iterable(output): remote_ip = ent["remote"].strip("+*#o-x").strip() if remote_ip in server_ips: if 'remote' in data and remote_ip not in data['remote']: @@ -507,14 +511,14 @@ def verify_ntp_service_status(dut, status, iteration=1, delay=1): def verify_ntp_server_exists(dut, server_ip=None, **kwargs): output = show_ntp_server(dut) if server_ip is None: - if "No association ID's returned" in output: + if "No association ID's returned" in str(output): return True else: return False else: server_ips = [server_ip] if type(server_ip) is str else list([str(e) for e in server_ip]) data = kwargs - for ent in output: + for ent in iterable(output): remote_ip = ent["remote"].strip("+*#o-x").strip() if remote_ip in server_ips: if 'remote' in data and remote_ip not in data['remote']: @@ -579,9 +583,9 @@ def get_rest_server_info(server_output): for server in servers: temp = dict() server_details = server['state'] - req_params = ['address', 'openconfig-system-ext:reach', 'openconfig-system-ext:now', 'stratum', 'openconfig-system-ext:peerdelay', 'openconfig-system-ext:peertype', 'openconfig-system-ext:peeroffset', 'openconfig-system-ext:peerjitter', 'poll-interval', 'openconfig-system-ext:refid'] + req_params = ['address', 'openconfig-system-ext:reach', 'openconfig-system-ext:now', 'stratum', 'openconfig-system-ext:peerdelay', 'openconfig-system-ext:peertype', 'openconfig-system-ext:peeroffset', 'openconfig-system-ext:peerjitter', 'poll-interval', 'openconfig-system-ext:refid', 'openconfig-system-ext:selmode'] if all(param in server_details for param in req_params): - temp['remote'] = str(server_details['address']) + temp['remote'] = str(server_details['openconfig-system-ext:selmode']+server_details['address']) temp['reach'] = str(server_details['openconfig-system-ext:reach']) temp['when'] = str(server_details['openconfig-system-ext:now']) temp['st'] = str(server_details['stratum']) @@ -616,5 +620,254 @@ def get_time_zone_info(data): ret_val.append(out) return ret_val else: - st.log("invalid data") - return False \ No newline at end of file + st.error("invalid data") + return False + + +def get_ntp_logs(dut, filter=None): + """ + To get the NTP related logs from /var/log/ntp.log + :param dut: + :param filter: + :return out_list + """ + command = "cat /var/log/ntp.log" + command = "{} | grep '{}'".format(command, filter) if filter else command + output = st.show(dut, command, skip_tmpl=True, skip_error_check=True, faster_cli=False, max_time=1200) + out_list = output.strip().split('\n')[:-1] + for _ in range(out_list.count("'")): + out_list.remove("'") + return out_list + + +def verify_time_synch(server_time, client_time): + diff=10 + month_dict = {"Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12} + try: + time1 = datetime.datetime(int(server_time['year']), month_dict[server_time['month']], int(server_time['monthday']), int(server_time['hours']), int(server_time['minutes']), int(server_time['seconds'])) + time2 = datetime.datetime(int(client_time['year']), month_dict[client_time['month']], int(client_time['monthday']), int(client_time['hours']), int(client_time['minutes']), int(client_time['seconds'])) + difference = (time1 - time2).total_seconds() + except Exception as e: + st.error("'{}' exception occurred".format(e)) + return False + return True if int(difference) < diff else False + + +def verify_ntp_synch(dut, server): + entries = show_ntp_server(dut) + if filter_and_select(entries, None, {'remote': "*{}".format(server)}): + st.debug("NTP synchronized with server: {}".format(server)) + return True + st.error("NTP not synchronized with server: {}".format(server)) + return False + + +def config_ntp_parameters(dut, **kwargs): + """ + To Configure NTP paramters + Author: Jagadish Chatrasi (jagadish.chatrasi@broadcom.com) + """ + cli_type = st.get_ui_type(dut, **kwargs) + config = kwargs.get('config', True) + skip_error = kwargs.get('skip_error', False) + commands = list() + if cli_type == "klish": + if 'source_intf' in kwargs: + config_string = '' if config else 'no ' + for src_intf in make_list(kwargs['source_intf']): + intf_data = get_interface_number_from_name(src_intf) + commands.append('{}ntp source-interface {} {}'.format(config_string, intf_data['type'], intf_data['number'])) + if 'vrf' in kwargs: + if not config: + commands.append('no ntp vrf') + else: + commands.append('ntp vrf {}'.format(kwargs['vrf'])) + if 'authenticate' in kwargs: + config_string = '' if config else 'no ' + commands.append('{}ntp authenticate'.format(config_string)) + if kwargs.get('auth_key_id'): + if not config: + commands.append('no ntp authentication-key {}'.format(kwargs['auth_key_id'])) + else: + if kwargs.get('auth_type') and kwargs.get('auth_string'): + commands.append('ntp authentication-key {} {} "{}"'.format(kwargs['auth_key_id'], kwargs['auth_type'], kwargs['auth_string'])) + if kwargs.get('trusted_key'): + config_string = '' if config else 'no ' + commands.append('{}ntp trusted-key {}'.format(config_string, kwargs['trusted_key'])) + if kwargs.get('servers'): + servers = make_list(kwargs.get('servers')) + for server in servers: + if not config: + commands.append('no ntp server {}'.format(server)) + else: + commands.append('ntp server {} key {}'.format(server, kwargs['server_key']) if kwargs.get('server_key') else 'ntp server {}'.format(server)) + elif cli_type in ["rest-patch", "rest-put"]: + rest_urls = st.get_datastore(dut, "rest_urls") + if 'source_intf' in kwargs: + for src_intf in make_list(kwargs['source_intf']): + src_intf = 'eth0' if src_intf == "Management0" else src_intf + if config: + url = rest_urls['ntp_config_source_interface'] + payload = json.loads("""{"openconfig-system-ext:ntp-source-interface": ["string"]}""") + payload["openconfig-system-ext:ntp-source-interface"] = [src_intf] + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + else: + url = rest_urls['ntp_delete_source_interface'].format(src_intf) + if not delete_rest(dut, rest_url=url): + return False + if 'vrf' in kwargs: + if config: + url = rest_urls['ntp_config_vrf_delete'] + payload = json.loads("""{"openconfig-system-ext:vrf": "string"}""") + payload["openconfig-system-ext:vrf"] = kwargs['vrf'] + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + else: + url = rest_urls['ntp_config_vrf_delete'] + if not delete_rest(dut, rest_url=url): + return False + if 'authenticate' in kwargs: + url = rest_urls['ntp_config'] + if config: + payload = json.loads("""{"openconfig-system:config": {"enable-ntp-auth": true}}""") + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + else: + payload = json.loads("""{"openconfig-system:config": {"enable-ntp-auth": false}}""") + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + if kwargs.get('auth_key_id'): + keymap = {"md5" : "NTP_AUTH_MD5", 'sha1' : 'NTP_AUTH_SHA1', 'sha2-256' : 'NTP_AUTH_SHA2_256'} + if not config: + url = rest_urls['ntp_key_delete'].format(kwargs['auth_key_id']) + if not delete_rest(dut, rest_url=url): + return False + else: + if kwargs.get('auth_type') and kwargs.get('auth_string'): + url = rest_urls['ntp_key_config'] + payload = json.loads("""{"openconfig-system:ntp-keys": { + "ntp-key": [ + { + "key-id": 0, + "config": { + "key-id": 0, + "key-type": "string", + "openconfig-system-ext:encrypted": false, + "key-value": "string" + } + } + ] + } + }""") + payload["openconfig-system:ntp-keys"]["ntp-key"][0]["key-id"] = int(kwargs['auth_key_id']) + payload["openconfig-system:ntp-keys"]["ntp-key"][0]["config"]["key-id"] = int(kwargs['auth_key_id']) + payload["openconfig-system:ntp-keys"]["ntp-key"][0]["config"]["key-type"] = keymap[kwargs['auth_type']] + payload["openconfig-system:ntp-keys"]["ntp-key"][0]["config"]["key-value"] = kwargs['auth_string'] + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + if kwargs.get('trusted_key'): + if config: + url = rest_urls['ntp_config'] + payload = json.loads("""{"openconfig-system:config": {"openconfig-system-ext:trusted-key": [0]}}""") + payload["openconfig-system:config"]["openconfig-system-ext:trusted-key"] = [int(kwargs['trusted_key'])] + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + else: + url = rest_urls["ntp_trusted_key_delete"].format(kwargs['trusted_key']) + if not delete_rest(dut, rest_url=url): + return False + if kwargs.get('servers'): + servers = make_list(kwargs.get('servers')) + for server in servers: + if not config: + url = rest_urls['delete_ntp_server'].format(server) + if not delete_rest(dut, rest_url=url): + return False + else: + url = rest_urls['config_ntp_server'] + if kwargs.get('server_key'): + payload = json.loads("""{"openconfig-system:servers": { + "server": [ + { + "address": "string", + "config": { + "address": "string", + "openconfig-system-ext:key-id": 0 + } + } + ] + } + }""") + payload["openconfig-system:servers"]["server"][0]["address"] = server + payload["openconfig-system:servers"]["server"][0]["config"]["address"] = server + payload["openconfig-system:servers"]["server"][0]["config"]["openconfig-system-ext:key-id"] = int(kwargs.get('server_key')) + else: + payload = json.loads("""{"openconfig-system:servers": { + "server": [ + { + "address": "string", + "config": { + "address": "string" + } + } + ] + } + }""") + payload["openconfig-system:servers"]["server"][0]["address"] = server + payload["openconfig-system:servers"]["server"][0]["config"]["address"] = server + if not config_rest(dut, http_method=cli_type, rest_url=url, json_data=payload): + return False + else: + st.error("Unsupported CLI_TYPE: {}".format(cli_type)) + return False + if commands: + response = st.config(dut, commands, type=cli_type, skip_error_check=skip_error) + if any(error in response.lower() for error in errors_list): + st.error("The response is: {}".format(response)) + return False + return True + +def show(dut, kwargs): + """ + To get the show output of NTP servers/global configuration + Author: Jagadish Chatrasi (jagadish.chatrasi@broadcom) + """ + cli_type = st.get_ui_type(dut, **kwargs) + cli_type = 'klish' if cli_type == 'click' else cli_type + if cli_type == 'klish': + if kwargs.get('server'): + command = 'show ntp server' + elif kwargs.get('global'): + command = 'show ntp global' + else: + st.error('show command is not called for server/global') + return False + return st.show(dut, command, type=cli_type) + elif cli_type in ['rest-patch', 'rest-put']: + output = [] + rest_urls = st.get_datastore(dut, "rest_urls") + if kwargs.get('server'): + url = rest_urls["show_ntp_server"] + payload = get_rest(dut, rest_url=url)["output"]["openconfig-system:server"] + for row in payload: + table_data = {'server': row["state"]["address"]} + output.append(copy.deepcopy(table_data)) + elif kwargs.get('global'): + url = rest_urls["show_ntp_global"] + table_data = {'source_intf' : "", 'vrf' : ""} + payload = get_rest(dut, rest_url=url)["output"]["openconfig-system:state"] + if "openconfig-system-ext:ntp-source-interface" in payload: + table_data['source_intf'] = payload["openconfig-system-ext:ntp-source-interface"] + if "openconfig-system-ext:vrf" in payload: + table_data['vrf'] = payload["openconfig-system-ext:vrf"] + output.append(copy.deepcopy(table_data)) + else: + st.error('show command is not called for server/global') + return False + st.log("OUTPUT : {}".format(output)) + return output + else: + st.error("Unsupported CLI_TYPE: {}".format(cli_type)) + return False + diff --git a/spytest/apis/system/pfc.py b/spytest/apis/system/pfc.py index 5bb47eb7430..e361e63a8e9 100644 --- a/spytest/apis/system/pfc.py +++ b/spytest/apis/system/pfc.py @@ -746,7 +746,7 @@ def config_pfc_buffer_prameters(dut, hwsku, ports_dict, **kwargs): update_retval([buffer_pg, buffer_queue]) st.debug(retval) - elif hwsku.lower() in constants['TD3_PLATFORMS']: + elif hwsku.lower() in constants['TD3_PLATFORMS']+constants['MV2_PLATFORMS']: if core_buffer_config: buffer_pool = {"BUFFER_POOL": {"egress_lossless_pool": {"mode": "static", "size": "33004032", "type": "egress"}, "egress_lossy_pool": {"mode": "dynamic", "size": "12766208", "type": "egress"}, diff --git a/spytest/apis/system/port.py b/spytest/apis/system/port.py index c23f1c29db6..4f16ac0e4c1 100644 --- a/spytest/apis/system/port.py +++ b/spytest/apis/system/port.py @@ -145,7 +145,7 @@ def get_status(dut, port=None, cli_type=''): else: url = rest_urls['all_interfaces_details'] output = get_rest(dut, rest_url = url, timeout=60) - processed_output = process_intf_status_rest_output(output, single_port=False) + processed_output = process_intf_status_rest_output(output) if processed_output: result.extend(processed_output) return result @@ -243,6 +243,8 @@ def verify_oper_state(dut, port, state, cli_type=''): def get_interface_counters_all(dut, port=None, cli_type=''): cli_type = st.get_ui_type(dut, cli_type=cli_type) if cli_type == 'click': + # To avoid traffic rate inaccuracy, run and ignore first show command in click & use second one + st.show(dut, "show interfaces counters -a", type=cli_type) if port: command = "show interfaces counters -a -i {}".format(port) if not st.is_feature_supported("show-interfaces-counters-interface-command", dut): diff --git a/spytest/apis/system/port_rest.py b/spytest/apis/system/port_rest.py index a90e26de89a..c72869fc4f3 100644 --- a/spytest/apis/system/port_rest.py +++ b/spytest/apis/system/port_rest.py @@ -4,81 +4,87 @@ invalid_ports = ['vlan', 'eth0'] -def process_intf_status_rest_output(data, single_port=True): +def process_intf_status_rest_output(data): """ Author: Jagadish Chatrasi (jagadish.chatrasi@broadcom.com) :param data: :type dict: - :param single_port: :type bool: :return: :rtype: """ - ret_val = [] - try: - if single_port: - actaul_data = data['output']['openconfig-interfaces:interface'] - else: - actaul_data = data['output']['openconfig-interfaces:interfaces']['interface'] - for intf_entry in actaul_data: - temp = {} - if any(port in intf_entry['name'].lower() for port in invalid_ports): + ret_val = list() + if isinstance(data, dict) and data.get('output') and data['output'].get('openconfig-interfaces:interface'): + actual_data = data['output']['openconfig-interfaces:interface'] + elif isinstance(data, dict) and data.get('output') and data['output'].get('openconfig-interfaces:interfaces') and data['output']['openconfig-interfaces:interfaces'].get('interface'): + actual_data = data['output']['openconfig-interfaces:interfaces']['interface'] + else: + st.error('interface status GET data is not as per format') + st.debug('Provided data: {}'.format(data)) + return ret_val + if actual_data and isinstance(actual_data, list): + for intf_entry in actual_data: + temp = dict() + if intf_entry.get('name') and any(port in intf_entry['name'].lower() for port in invalid_ports): continue - temp['interface'] = intf_entry['name'] - if 'description' in intf_entry['state']: - temp['description'] = intf_entry['state']['description'] - temp['oper'] = intf_entry['state']['oper-status'].lower() - temp['admin'] = intf_entry['state']['admin-status'].lower() - temp['mtu'] = str(intf_entry['state']['mtu']) - port_speed = intf_entry.get('openconfig-if-ethernet:ethernet',{}).get('state',{}).get('port-speed',None) + temp['interface'] = intf_entry.get('name', '') + temp['description'] = intf_entry['state']['description'] if intf_entry.get('state') and intf_entry['state'].get('description') else '-' + temp['oper'] = intf_entry['state']['oper-status'].lower() if intf_entry.get('state') and intf_entry['state'].get('oper-status') else '' + temp['admin'] = intf_entry['state']['admin-status'].lower() if intf_entry.get('state') and intf_entry['state'].get('admin-status') else '' + temp['mtu'] = str(intf_entry['state']['mtu']) if intf_entry.get('state') and intf_entry['state'].get('mtu') else '9100' + port_speed = intf_entry['openconfig-if-ethernet:ethernet']['state']['port-speed'] if intf_entry.get('openconfig-if-ethernet:ethernet') and intf_entry['openconfig-if-ethernet:ethernet'].get('state') and intf_entry['openconfig-if-ethernet:ethernet']['state'].get('port-speed') else '' temp['speed'] = port_speed.replace('GB', '000').split(":")[1].replace('SPEED_', '') if port_speed else port_speed ret_val.append(temp) st.debug(ret_val) return ret_val - except Exception as e: - st.error("{} exception occurred".format(e)) - st.debug("Given data is:{}".format(data)) + else: + st.debug("Provided data: {}".format(data)) st.debug(ret_val) return ret_val -def process_intf_counters_rest_output(data, single_port=False): +def process_intf_counters_rest_output(data): """ Author: Jagadish Chatrasi (jagadish.chatrasi@broadcom.com) :param data: :type dict: - :param single_port: :type bool: :return: :rtype: """ - ret_val = [] - try: - if single_port: - actaul_data = data['output']['openconfig-interfaces:interface'] - else: - actaul_data = data['output']['openconfig-interfaces:interfaces']['interface'] - for intf_entry in actaul_data: + ret_val = list() + if isinstance(data, dict) and data.get('output') and data['output'].get('openconfig-interfaces:interface'): + actual_data = data['output']['openconfig-interfaces:interface'] + elif isinstance(data, dict) and data.get('output') and data['output'].get('openconfig-interfaces:interfaces') and data['output']['openconfig-interfaces:interfaces'].get('interface'): + actual_data = data['output']['openconfig-interfaces:interfaces']['interface'] + else: + st.error('interface counters GET data is not as per format') + st.debug('Provided data: {}'.format(data)) + return ret_val + if actual_data and isinstance(actual_data, list): + for intf_entry in actual_data: temp = {} - if any(port in intf_entry['name'].lower() for port in invalid_ports): + if intf_entry.get('name') and any(port in intf_entry['name'].lower() for port in invalid_ports): continue - temp['iface'] = intf_entry['name'] - temp['state'] = intf_entry['state']['oper-status'].upper()[0] - temp['rx_ok'] = intf_entry['state']['counters']['in-pkts'] - temp['rx_err'] = intf_entry['state']['counters']['in-errors'] - temp['rx_drp'] = intf_entry['state']['counters']['in-discards'] - temp['tx_ok'] = intf_entry['state']['counters']['out-pkts'] - temp['tx_err'] = intf_entry['state']['counters']['out-errors'] - temp['tx_drp'] = intf_entry['state']['counters']['out-discards'] - if 'PortChannel' not in intf_entry['name']: ##RX_OVERSIZE and TX_OVERSIZE counters are not available for LAG, reported SONIC-32454 - temp['rx_ovr'] = intf_entry['openconfig-if-ethernet:ethernet']['state']['counters']['in-oversize-frames'] - temp['tx_ovr'] = intf_entry['openconfig-if-ethernet:ethernet']['state']['counters']['openconfig-interfaces-ext:out-oversize-frames'] + temp['iface'] = intf_entry.get('name', '') + temp['state'] = intf_entry['state']['oper-status'].upper()[0] if intf_entry.get('state') and intf_entry['state'].get('oper-status') and len(intf_entry['state']['oper-status']) >=1 else '' + if intf_entry.get('state') and intf_entry['state'].get('counters'): + counters_data = intf_entry['state']['counters'] + temp['rx_ok'] = str(counters_data.get('in-pkts', '0')) + temp['rx_err'] = str(counters_data.get('in-errors', '0')) + temp['rx_drp'] = str(counters_data.get('in-discards', '0')) + temp['tx_ok'] = str(counters_data.get('out-pkts', '0')) + temp['tx_err'] = str(counters_data.get('out-errors', '0')) + temp['tx_drp'] = str(counters_data.get('out-discards', '0')) + if intf_entry.get('openconfig-if-ethernet:ethernet') and intf_entry['openconfig-if-ethernet:ethernet'].get('state') and intf_entry['openconfig-if-ethernet:ethernet']['state'].get('counters'): + counters_data = intf_entry['openconfig-if-ethernet:ethernet']['state']['counters'] + temp['rx_ovr'] = str(counters_data.get('in-oversize-frames', '0')) + temp['tx_ovr'] = str(counters_data.get('openconfig-interfaces-ext:out-oversize-frames', '0')) ret_val.append(temp) st.debug(ret_val) return ret_val - except Exception as e: - st.error("{} exception occurred".format(e)) - st.debug("Given data is:{}".format(data)) + else: + st.debug('Provided data: {}'.format(data)) st.debug(ret_val) return ret_val @@ -96,23 +102,29 @@ def rest_get_queue_counters(dut, port): ret_val = list() rest_urls = st.get_datastore(dut, 'rest_urls') url = rest_urls['queue_counters_get'].format(port) - try: - get_info = get_rest(dut, rest_url=url, timeout=60) - for entry in get_info['output']['openconfig-qos:queues']['queue']: + get_info = get_rest(dut, rest_url=url, timeout=60) + if isinstance(get_info, dict) and get_info.get('output') and isinstance(get_info['output'], dict) and get_info['output'].get('openconfig-qos:queues') and get_info['output']['openconfig-qos:queues'].get('queue'): + actual_data = get_info['output']['openconfig-qos:queues']['queue'] + else: + st.error('queue counters GET data is not as per format') + st.debug('Provided data: {}'.format(get_info)) + return ret_val + if actual_data and isinstance(actual_data, list): + for entry in actual_data: temp = dict() - counters_info = entry['state'] - port, queue = counters_info['name'].split(':') - temp['port'] = port - temp['txq'] = "{}{}".format(counters_info["openconfig-qos-ext:traffic-type"], queue) - temp['pkts_drop'] = counters_info['dropped-pkts'] - temp['byte_drop'] = counters_info['openconfig-qos-ext:dropped-octets'] - temp['pkts_count'] = counters_info['transmit-pkts'] - temp['byte_count'] = counters_info['transmit-octets'] - ret_val.append(temp) + if isinstance(entry, dict) and entry.get('state') and isinstance(entry['state'], dict): + counters_info = entry['state'] + port, queue = counters_info['name'].split(':') if counters_info.get('name') and ':' in counters_info['name'] else ['', ''] + temp['port'] = port + temp['txq'] = '{}{}'.format(counters_info['openconfig-qos-ext:traffic-type'], queue) if counters_info.get('openconfig-qos-ext:traffic-type') else '{}{}'.format('NA', queue) + temp['pkts_drop'] = str(counters_info.get('dropped-pkts', '0')) + temp['byte_drop'] = str(counters_info.get('openconfig-qos-ext:dropped-octets', '0')) + temp['pkts_count'] = str(counters_info.get('transmit-pkts', '0')) + temp['byte_count'] = str(counters_info.get('transmit-octets', '0')) + ret_val.append(temp) st.debug(ret_val) return ret_val - except Exception as e: - st.error("{} exception occurred".format(e)) - st.log("The output is:{}".format(get_info)) + else: + st.debug('Provided data: {}'.format(get_info)) st.debug(ret_val) return ret_val diff --git a/spytest/apis/system/snapshot.py b/spytest/apis/system/snapshot.py index c2081305e55..bc66f2f21d0 100644 --- a/spytest/apis/system/snapshot.py +++ b/spytest/apis/system/snapshot.py @@ -2,7 +2,7 @@ #Author: prudviraj k (prudviraj.kristipati.@broadcom.com) from spytest import st -from utilities.common import filter_and_select, dicts_list_values,integer_parse +from utilities.common import filter_and_select, dicts_list_values,integer_parse, iterable from utilities.utils import get_dict_from_redis_cli, get_interface_number_from_name from apis.common import redis from apis.system.rest import config_rest, delete_rest, get_rest @@ -355,7 +355,7 @@ def load_json_config(dut, convert_json, config_file): def multicast_queue_start_value(dut, *argv, **kwargs): result = True output = show(dut, *argv, **kwargs) - for queue in output: + for queue in iterable(output): if queue.get('mc8'): result = True break diff --git a/spytest/apis/system/snmp.py b/spytest/apis/system/snmp.py index 13f060e93eb..42fc7b3d590 100644 --- a/spytest/apis/system/snmp.py +++ b/spytest/apis/system/snmp.py @@ -5,8 +5,9 @@ import re from spytest import st -from spytest.utils import filter_and_select + import utilities.utils as utils_obj +from utilities.common import filter_and_select, iterable from apis.system.basic import replace_line_in_file, service_operations from apis.system.connection import execute_command @@ -402,18 +403,6 @@ def walk_snmp_operation(**kwargs): st.error(output) return False -def get_snmp_memory(ipaddress,oid,community_name): - res=dict() - output=walk_snmp_operation(ipaddress=ipaddress, oid=oid, community_name=community_name,skip_tmpl=False) - if len(output) == 0: - st.error("Failed to get the memory data") - return False - for data in output: - out = re.findall(r"UCD-SNMP-MIB::mem(TotalReal|AvailReal|TotalFree|Cached).0 = INTEGER: (\d+) kB", data) - if out: - for i in out: - res[i[0]] = i[1] - return res def poll_for_snmp(dut, iteration_count=30, delay=1, **kwargs): """ @@ -743,7 +732,7 @@ def config(dut, params): if not value.get("no_form"): command += " {}".format(value.get("name")) if value.get("group_name"): - command += " groupname {}".format(value.get("group_name")) + command += " group {}".format(value.get("group_name")) else: command = "no {} {}".format(command, value.get("name")) if command: @@ -995,6 +984,44 @@ def show(dut, **kwargs): return response +def show_snmp_counters(dut, **kwargs): + """ + Api to show snmp counters + :param dut: + :param kwargs: + :return: + """ + #cli_type = st.get_ui_type(dut, **kwargs) + command = "show snmp counters" + return st.show(dut, command, type="klish") + +def clear_snmp_counters(dut, **kwargs): + """ + Api to clear snmp counters + :param dut: + :param kwargs: + :return: + """ + #cli_type = kwargs.get("cli_type", "klish") + command = "clear snmp counters" + return st.config(dut, command, type="klish") + +def verify_snmp_counters(dut, **kwargs): + cli_type = kwargs.get("cli_type", "klish") + output = show_snmp_counters(dut, cli_type=cli_type) + if not output: + st.error("Couldn't get the output for snmp counters") + return False + map = kwargs.get('map', {}) + if not map: + st.error("Mandatory fields missing to verify the output for snmp counters") + return False + for key, value in map.items(): + if int(output[0][key]) != value: + st.error('Match not found for %s :: Expected: %s Actual : %s' % (key, value, output[0][key])) + return False + st.log('Match Found for %s :: Expected: %s Actual : %s' % (key, value, output[0][key])) + return True def verify(dut, **kwargs): """ @@ -1052,7 +1079,7 @@ def poll_for_snmp_walk_output(dut, iteration_count=5, delay=1, expected_output=" i = 1 while True: snmp_walk_out_put = walk_snmp_operation(**kwargs) - for match in snmp_walk_out_put: + for match in iterable(snmp_walk_out_put): if expected_output in match: st.log("Found expected output\n") return snmp_walk_out_put @@ -1077,7 +1104,7 @@ def poll_for_snmp_get_output(dut, iteration_count=5, delay=1, expected_output="" i = 1 while True: snmp_get_out_put = get_snmp_operation(**kwargs) - for match in snmp_get_out_put: + for match in iterable(snmp_get_out_put): if expected_output in match: st.log("Found expected output\n") return snmp_get_out_put @@ -1181,3 +1208,71 @@ def config_agentx(dut, config='yes', cli_type=''): cmd = config + ' agentx' st.config(dut, cmd, type=cli_type) +def report_set_snmp_operation_fail(msg): + #st.report_fail('snmp_operation_fail', 'SET', msg) + return False + +def set_snmp_operation(**kwargs): + """ + To perform SNMP SET operation + Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) + + :param :ipaddress: + :param :oid: + :param :community_name: + :param :timeout: + :param :object_name: + :return: + """ + community_name = kwargs.get("community_name") + ip_address = kwargs.get("ipaddress") + oid = kwargs.get("oid") + object_type = kwargs.get("objtype") + object_name = kwargs.get("objname") + version = kwargs.get("version", "2") + filter = kwargs.get("filter", "-Oqv") + + command = 'snmpset' + if version not in ["1", "2"]: + st.log("Unsupported version provided") + return False + if not ip_address or not oid: + st.log("Mandatory parameters like ipaddress or/and oid not passed") + return False + if version in ["1", "2"]: + if not community_name: + st.log("Mandatory parameter community_name not passed") + return False + act_version = "1" if version == "1" else "2c" + snmp_command = "{} {} -v {} -c {} {} {}".format(command, filter, act_version, community_name, + ip_address, oid) + if object_name: + snmp_command = "{} {} -v {} -c {} {} {} {} '{}'".format(command, filter, act_version, community_name, ip_address, + oid, object_type, object_name) + st.log("snmp command:{}".format(snmp_command)) + pprocess = subprocess.Popen(snmp_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + (stdout, stderr) = pprocess.communicate() + st.log("SNMP stdout: {}".format(stdout)) + if pprocess.poll() is not None: + if pprocess.returncode == 0 \ + and "No Such Object available on this agent at this OID" not in stdout \ + and "No Such Instance currently exists at this OID" not in stdout \ + and "No more" not in stdout: + result = stdout.rstrip('\n').split("\n") + result1 = [each.replace('"', '') for each in result] + return result1 + elif "Timeout" in stderr: + st.error("SNMP Timeout occurs") + return report_set_snmp_operation_fail('Timeout') + elif "No Such Instance currently exists at this OID" in stdout: + result = stderr.strip("\n") + st.error(result) + return report_set_snmp_operation_fail('No Instance Found') + else: + st.log("SNMP Error: return code = {}".format(pprocess.returncode)) + st.log("SNMP stdout: {}".format(stdout)) + st.error("SNMP stderr: {}".format(stderr)) + return report_set_snmp_operation_fail('Error') + + return True diff --git a/spytest/bin/lint.sh b/spytest/bin/lint.sh index fd922a75ff6..0414f4b9848 100755 --- a/spytest/bin/lint.sh +++ b/spytest/bin/lint.sh @@ -100,7 +100,7 @@ if [ "$LINT_DAILY" == "1" ]; then REPORT=daily_lint_report.log ERR_FILE=daily_lint_errors.log #IGNORE2="$IGNORE2 --disable=W0611" #unused-import - IGNORE2="$IGNORE2 --disable=W0612" #unused-variable + #IGNORE2="$IGNORE2 --disable=W0612" #unused-variable IGNORE2="$IGNORE2 --disable=W0106" #expression-not-assigned exclude="__init__.py ddm/ tests/ut/ tests/systb/ scheduler/ tests/dell" fi @@ -150,6 +150,7 @@ rm -f $REPORT $ERR_FILE $ERR_TEMP line="\--------------------------------------------------------------------" score="Your code has been rated at 10.00" using="Using config file " +date | tee -a $REPORT | tee -a $ERR_FILE for f in $files2;do if [ -n "$FLAKE8" ]; then echo ================== FLAKES8 $f | tee -a $REPORT diff --git a/spytest/bin/tgen_folders.txt b/spytest/bin/tgen_folders.txt index c06e32ebc56..932f5c5771d 100644 --- a/spytest/bin/tgen_folders.txt +++ b/spytest/bin/tgen_folders.txt @@ -43,7 +43,6 @@ /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode/hltapiPythonWrapper /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode/hltapiForPython -/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode/hltapiForPython/__pycache__ /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode/sqllibraries /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode/sqllibraries/SunOS /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI/SourceCode/sqllibraries/Linux @@ -146,6 +145,26 @@ /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/STAKCommands/spirent/datacenter/oseUnitTestConfig /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/STAKCommands/spirent/datacenter/Core /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/STAKCommands/spirent/datacenter/Core/Utils +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91 +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiPythonWrapper +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiForPython +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiForPython/__pycache__ +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries/SunOS +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries/Linux +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/sqllibraries/Windows +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiForPerl +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/model +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiPerlWrapper +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/hltapiWrapper +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/tools +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SourceCode/tools/iTest +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SampleScripts +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SampleScripts/hltapi +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SampleScripts/hltapiForPython +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SampleScripts/hltapiForPerl +/projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/HltAPI_4.91/SampleScripts/hltapiForRobot /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/Utilities /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/Utilities/Results /projects/scid/tgen/stc/4.91/Spirent_TestCenter_Application_Linux/Utilities/Results/lib diff --git a/spytest/datastore/constants/accton-as4630-54pe.yaml b/spytest/datastore/constants/accton-as4630-54pe.yaml index 19b5377db93..d63de8f5b42 100644 --- a/spytest/datastore/constants/accton-as4630-54pe.yaml +++ b/spytest/datastore/constants/accton-as4630-54pe.yaml @@ -1 +1,11 @@ -WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" \ No newline at end of file +WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/accton-as5712-54x.yaml b/spytest/datastore/constants/accton-as5712-54x.yaml index 18bdc67213d..822691f63c8 100644 --- a/spytest/datastore/constants/accton-as5712-54x.yaml +++ b/spytest/datastore/constants/accton-as5712-54x.yaml @@ -47,3 +47,13 @@ MAX_IPV4_ROUTES_SUPPORTED: '200704' MAX_IPV6_ROUTES_SUPPORTED: '32768' MAX_IPV4_ROUTES_FOR_ERROR_HANDLING: '128000' MAX_IPV6_ROUTES_FOR_ERROR_HANDLING: '128000' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 8, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 4, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/accton-as7326-56x.yaml b/spytest/datastore/constants/accton-as7326-56x.yaml index 2b8ef861985..e0d613822fe 100644 --- a/spytest/datastore/constants/accton-as7326-56x.yaml +++ b/spytest/datastore/constants/accton-as7326-56x.yaml @@ -48,3 +48,13 @@ MAX_IPV4_ROUTES_SUPPORTED: '82944' MAX_IPV6_ROUTES_SUPPORTED: '32768' MAX_IPV4_ROUTES_FOR_ERROR_HANDLING: '128000' MAX_IPV6_ROUTES_FOR_ERROR_HANDLING: '128000' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/accton-as7712-32x.yaml b/spytest/datastore/constants/accton-as7712-32x.yaml index 9497c55acc5..05a6a62d8be 100644 --- a/spytest/datastore/constants/accton-as7712-32x.yaml +++ b/spytest/datastore/constants/accton-as7712-32x.yaml @@ -83,3 +83,13 @@ PDDF_THERMAL_LIST: - 'Temp_3' - 'Temp_CPU' Manufacturer: "accton" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/accton-as7726-32x.yaml b/spytest/datastore/constants/accton-as7726-32x.yaml index 3ced1d38853..87b57d0f4ee 100644 --- a/spytest/datastore/constants/accton-as7726-32x.yaml +++ b/spytest/datastore/constants/accton-as7726-32x.yaml @@ -47,4 +47,14 @@ WRED_ECN : "ecn_all" INGRESS_LOSSY_PROFILE : "ingress_lossy_profile" INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" -EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/accton-as7816-64x.yaml b/spytest/datastore/constants/accton-as7816-64x.yaml index 60bd70d4479..e98b3f20d08 100644 --- a/spytest/datastore/constants/accton-as7816-64x.yaml +++ b/spytest/datastore/constants/accton-as7816-64x.yaml @@ -60,3 +60,13 @@ MAX_IPV4_ROUTES_SUPPORTED: '200704' MAX_IPV6_ROUTES_SUPPORTED: '32768' MAX_IPV4_ROUTES_FOR_ERROR_HANDLING: '128000' MAX_IPV6_ROUTES_FOR_ERROR_HANDLING: '128000' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/accton-as9716-32d.yaml b/spytest/datastore/constants/accton-as9716-32d.yaml index 0989595029b..803ef75e917 100644 --- a/spytest/datastore/constants/accton-as9716-32d.yaml +++ b/spytest/datastore/constants/accton-as9716-32d.yaml @@ -43,4 +43,14 @@ EGRESS_LOSSLESS_PROFILE : 'egress_lossy_profile' MAX_IPV4_ROUTES_SUPPORTED: '200704' MAX_IPV6_ROUTES_SUPPORTED: '32768' MAX_IPV4_ROUTES_FOR_ERROR_HANDLING: '270000' -MAX_IPV6_ROUTES_FOR_ERROR_HANDLING: '270000' \ No newline at end of file +MAX_IPV6_ROUTES_FOR_ERROR_HANDLING: '270000' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 128, +'V6_EGRESS_ACL_RULES': 128, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 2 +} \ No newline at end of file diff --git a/spytest/datastore/constants/common.yaml b/spytest/datastore/constants/common.yaml index ec13804f613..a6b238ed16c 100644 --- a/spytest/datastore/constants/common.yaml +++ b/spytest/datastore/constants/common.yaml @@ -10,6 +10,7 @@ QUANTA_IX8_56X : &IX8 quanta-ix8-56x QUANTA-IX8A-BWDE-56X: &IX8A quanta-ix8a-bwde-56x QUANTA_IX9_32X : &IX9 quanta-ix9-32x QUANTA_IX7_32X : &IX7 quanta-ix7-32x +QUANTA-IX7-bwde-32X : &IX7_BWDE quanta-ix7-bwde-32x DELLEMC_Z9332F_O32 : &Z9332f_O32 dellemc-z9332f-o32 ACCTON_AS7726_32X : &AS7726 accton-as7726-32x QUANTA-IX4-64X : &IX4 quanta-ix4-64x @@ -37,6 +38,7 @@ TD3_PLATFORMS : - *AS7326 - *IX8 - *IX7 + - *IX7_BWDE - *IX8A - *AS7726 - *AS4630 @@ -93,6 +95,7 @@ WARM_REBOOT_SUPPORTED_PLATFORMS : - *S5232f_P_25G - *IX9 - *IX7 + - *IX7_BWDE - *Z9332f_O32 - *AS9716 - *AS5835 @@ -147,3 +150,4 @@ HW_WATCHDOG_SUPPORTED_PLATFORMS: - *IX9 HW_WATCHDOG_REBOOT_CAUSE_SUPPORTED_PLATFORMS: - *AS4630 + - *AS5835 \ No newline at end of file diff --git a/spytest/datastore/constants/constants.yaml b/spytest/datastore/constants/constants.yaml index a8f4462e521..45068d44fc8 100644 --- a/spytest/datastore/constants/constants.yaml +++ b/spytest/datastore/constants/constants.yaml @@ -20,6 +20,7 @@ quanta-ix8-56x: !include quanta-ix8-56x.yaml quanta-ix8a-bwde-56x: !include quanta-ix8a-bwde-56x.yaml quanta-ix9-32x: !include quanta-ix9-32x.yaml quanta-ix7-32x: !include quanta-ix7-32x.yaml +quanta-ix7-bwde-32x: !include quanta-ix7-bwde-32x.yaml quanta-ix9-c64o16: !include quanta-ix9-c64o16.yaml accton-as5835-54x: !include accton-as5835-54x.yaml @@ -45,5 +46,6 @@ constants: quanta-ix8a-bwde-56x: [common, quanta-ix8a-bwde-56x] quanta-ix9-32x: [common, quanta-ix9-32x] quanta-ix7-32x: [common, quanta-ix7-32x] + quanta-ix7-bwde-32x: [common, quanta-ix7-bwde-32x] quanta-ix9-c64o16: [common, quanta-ix9-c64o16] accton-as5835-54x: [common, accton-as5835-54x] diff --git a/spytest/datastore/constants/dellemc-s5232f-c32.yaml b/spytest/datastore/constants/dellemc-s5232f-c32.yaml index 3ced1d38853..87b57d0f4ee 100644 --- a/spytest/datastore/constants/dellemc-s5232f-c32.yaml +++ b/spytest/datastore/constants/dellemc-s5232f-c32.yaml @@ -47,4 +47,14 @@ WRED_ECN : "ecn_all" INGRESS_LOSSY_PROFILE : "ingress_lossy_profile" INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" -EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/dellemc-s5232f-p-100g.yaml b/spytest/datastore/constants/dellemc-s5232f-p-100g.yaml index 3ced1d38853..87b57d0f4ee 100644 --- a/spytest/datastore/constants/dellemc-s5232f-p-100g.yaml +++ b/spytest/datastore/constants/dellemc-s5232f-p-100g.yaml @@ -47,4 +47,14 @@ WRED_ECN : "ecn_all" INGRESS_LOSSY_PROFILE : "ingress_lossy_profile" INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" -EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/dellemc-s5232f-p-10g.yaml b/spytest/datastore/constants/dellemc-s5232f-p-10g.yaml index 3ced1d38853..87b57d0f4ee 100644 --- a/spytest/datastore/constants/dellemc-s5232f-p-10g.yaml +++ b/spytest/datastore/constants/dellemc-s5232f-p-10g.yaml @@ -47,4 +47,14 @@ WRED_ECN : "ecn_all" INGRESS_LOSSY_PROFILE : "ingress_lossy_profile" INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" -EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/dellemc-s5232f-p-25g.yaml b/spytest/datastore/constants/dellemc-s5232f-p-25g.yaml index 3ced1d38853..87b57d0f4ee 100644 --- a/spytest/datastore/constants/dellemc-s5232f-p-25g.yaml +++ b/spytest/datastore/constants/dellemc-s5232f-p-25g.yaml @@ -47,4 +47,14 @@ WRED_ECN : "ecn_all" INGRESS_LOSSY_PROFILE : "ingress_lossy_profile" INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" -EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/dellemc-z9264f-c64.yaml b/spytest/datastore/constants/dellemc-z9264f-c64.yaml index 19b5377db93..904623f21e2 100644 --- a/spytest/datastore/constants/dellemc-z9264f-c64.yaml +++ b/spytest/datastore/constants/dellemc-z9264f-c64.yaml @@ -1 +1,11 @@ -WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" \ No newline at end of file +WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/dellemc-z9332f-o32.yaml b/spytest/datastore/constants/dellemc-z9332f-o32.yaml index f669dbb1c1a..fe5b7427baf 100644 --- a/spytest/datastore/constants/dellemc-z9332f-o32.yaml +++ b/spytest/datastore/constants/dellemc-z9332f-o32.yaml @@ -45,3 +45,13 @@ INGRESS_LOSSY_PROFILE : "ingress_lossy_profile" INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_40m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" EGRESS_LOSSLESS_PROFILE : "egress_lossy_profile" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 128, +'V6_EGRESS_ACL_RULES': 128, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 2 +} \ No newline at end of file diff --git a/spytest/datastore/constants/delta-ag9032v1.yaml b/spytest/datastore/constants/delta-ag9032v1.yaml index 1f4d9e5105f..b33985c541a 100644 --- a/spytest/datastore/constants/delta-ag9032v1.yaml +++ b/spytest/datastore/constants/delta-ag9032v1.yaml @@ -63,3 +63,13 @@ INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix1b-32x.yaml b/spytest/datastore/constants/quanta-ix1b-32x.yaml index 121af07d82a..26eda9ac1ee 100644 --- a/spytest/datastore/constants/quanta-ix1b-32x.yaml +++ b/spytest/datastore/constants/quanta-ix1b-32x.yaml @@ -2,3 +2,13 @@ MAX_IPV4_ROUTES_SUPPORTED: '66560' MAX_IPV6_ROUTES_SUPPORTED: '24576' MAX_IPV4_ROUTES_FOR_ERROR_HANDLING: '128000' MAX_IPV6_ROUTES_FOR_ERROR_HANDLING: '128000' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix4-64x.yaml b/spytest/datastore/constants/quanta-ix4-64x.yaml index e85783f7d9e..f4310eb2d0e 100644 --- a/spytest/datastore/constants/quanta-ix4-64x.yaml +++ b/spytest/datastore/constants/quanta-ix4-64x.yaml @@ -61,3 +61,13 @@ INGRESS_LOSSLESS_PROFILE : "pg_lossless_10000_300m_profile" EGRESS_LOSSY_PROFILE : "egress_lossy_profile" EGRESS_LOSSLESS_PROFILE : "egress_lossless_profile" WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 256, +'V6_EGRESS_ACL_RULES': 256, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix7-32x.yaml b/spytest/datastore/constants/quanta-ix7-32x.yaml index e69de29bb2d..28c051048cc 100644 --- a/spytest/datastore/constants/quanta-ix7-32x.yaml +++ b/spytest/datastore/constants/quanta-ix7-32x.yaml @@ -0,0 +1,10 @@ +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix7-bwde-32x.yaml b/spytest/datastore/constants/quanta-ix7-bwde-32x.yaml new file mode 100644 index 00000000000..28c051048cc --- /dev/null +++ b/spytest/datastore/constants/quanta-ix7-bwde-32x.yaml @@ -0,0 +1,10 @@ +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix8-56x.yaml b/spytest/datastore/constants/quanta-ix8-56x.yaml index d77f8aba01f..abd48d1c46e 100644 --- a/spytest/datastore/constants/quanta-ix8-56x.yaml +++ b/spytest/datastore/constants/quanta-ix8-56x.yaml @@ -73,4 +73,14 @@ PDDF_THERMAL_LIST: - 'Temp_Ambient_5' - 'Temp_Ambient_6' - 'Temp_CPU' -Manufacturer: "QSMC" \ No newline at end of file +Manufacturer: "QSMC" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix8a-bwde-56x.yaml b/spytest/datastore/constants/quanta-ix8a-bwde-56x.yaml index 19b5377db93..d63de8f5b42 100644 --- a/spytest/datastore/constants/quanta-ix8a-bwde-56x.yaml +++ b/spytest/datastore/constants/quanta-ix8a-bwde-56x.yaml @@ -1 +1,11 @@ -WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" \ No newline at end of file +WRED_LOSSLESS_PROFILE : "AZURE_LOSSLESS" +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 768, +'V6_INGRESS_ACL_RULES': 768, +'V4_EGRESS_ACL_RULES': 512, +'V6_EGRESS_ACL_RULES': 512, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 3, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 1, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 1 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix9-32x.yaml b/spytest/datastore/constants/quanta-ix9-32x.yaml index 2381b8b6c2d..ec44bc17c70 100644 --- a/spytest/datastore/constants/quanta-ix9-32x.yaml +++ b/spytest/datastore/constants/quanta-ix9-32x.yaml @@ -44,4 +44,14 @@ WRED_ECN : 'ecn_all' INGRESS_LOSSY_PROFILE : 'ingress_lossy_profile' INGRESS_LOSSLESS_PROFILE : 'pg_lossless_10000_40m_profile' EGRESS_LOSSY_PROFILE : 'egress_lossy_profile' -EGRESS_LOSSLESS_PROFILE : 'egress_lossy_profile' \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : 'egress_lossy_profile' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 128, +'V6_EGRESS_ACL_RULES': 128, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 2 +} \ No newline at end of file diff --git a/spytest/datastore/constants/quanta-ix9-c64o16.yaml b/spytest/datastore/constants/quanta-ix9-c64o16.yaml index db935941812..8ad6d2182ee 100644 --- a/spytest/datastore/constants/quanta-ix9-c64o16.yaml +++ b/spytest/datastore/constants/quanta-ix9-c64o16.yaml @@ -40,4 +40,14 @@ WRED_ECN : 'ecn_all' INGRESS_LOSSY_PROFILE : 'ingress_lossy_profile' INGRESS_LOSSLESS_PROFILE : 'pg_lossless_10000_40m_profile' EGRESS_LOSSY_PROFILE : 'egress_lossy_profile' -EGRESS_LOSSLESS_PROFILE : 'egress_lossy_profile' \ No newline at end of file +EGRESS_LOSSLESS_PROFILE : 'egress_lossy_profile' +ACL_SCALE: { +'V4_INGRESS_ACL_RULES': 256, +'V6_INGRESS_ACL_RULES': 256, +'V4_EGRESS_ACL_RULES': 128, +'V6_EGRESS_ACL_RULES': 128, +'V4_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_INGRESS_ACL_ON_AN_INTERFACE': 2, +'V4_EGRESS_ACL_ON_AN_INTERFACE': 2, +'V6_EGRESS_ACL_ON_AN_INTERFACE': 2 +} \ No newline at end of file diff --git a/spytest/datastore/messages/routing.yaml b/spytest/datastore/messages/routing.yaml index 2ec4dffd30c..06fa000152c 100644 --- a/spytest/datastore/messages/routing.yaml +++ b/spytest/datastore/messages/routing.yaml @@ -226,3 +226,5 @@ All_configured_servers_received_packets : Test case passed. All configured helpe IP_helper_service_is_not_up : Test case failed, IP helper is not up Vrf_Config_verification_failed : VRF config verification failed IP_helper_test_case_msg_status : Test case {} {} + +accessing_shell_status: Accessing {} shell after enable/disalbe bgp error handling {} \ No newline at end of file diff --git a/spytest/datastore/messages/system.yaml b/spytest/datastore/messages/system.yaml index c973ce9f989..593c8ef4ca3 100644 --- a/spytest/datastore/messages/system.yaml +++ b/spytest/datastore/messages/system.yaml @@ -152,6 +152,7 @@ snmp_trap_informs_status: snmp trap informs status of {} are {} snmp_community_invalid_error_code: Observed invalid error code {} when snmp community - {} is configured via rest. snmp_community_config_gnmi_unsuccessful: Failed to {} snmp community - {} via gnmi. snmp_agent_address_config_verify_unsuccessful: Failed to configure snmp agent addresses - {} with non-default port - {} +snmp_counter_not_incremented: SNMP counter value not incremented though simulated ##PMON pmon_service_not_up : PMON service is not up diff --git a/spytest/reporting/suites/community-ixia b/spytest/reporting/suites/community-ixia old mode 100755 new mode 100644 index 5e56c731b37..09c576be423 --- a/spytest/reporting/suites/community-ixia +++ b/spytest/reporting/suites/community-ixia @@ -1,14 +1,154 @@ -# Modules +#InfraModuleNeededforNodeDownScenarios ++file:batch/test_spytest_infra_1.py + +# Upstream - spytest 2.0 and legacy +file:qos/test_qos_save_reboot.py ++file:routing/BGP/test_bgp.py ++file:routing/BGP/test_bgp_4node.py ++file:routing/test_arp.py ++file:routing/test_ip.py ++file:sanity/test_sanity_l2.py ++file:security/test_tacacs.py ++file:switching/test_portchannel.py +file:system/crm/test_crm.py ++file:system/test_box_services.py ++file:system/test_device_mgmt.py ++file:system/test_interface.py ++file:system/test_lldp.py ++file:system/test_ntp.py ++file:system/test_snmp.py ++file:system/test_ssh.py + +# Upstream - spytest 2.1 and legacy ++file:qos/acl/test_acls_l3_forwarding.py ++file:qos/test_cos.py ++file:qos/test_wred.py ++file:routing/BGP/test_bgp_rr_traffic.py ++file:routing/NAT/test_nat.py ++file:routing/NAT/test_nat_reboot_long_run.py ++file:routing/NAT/test_nat_tcp.py ++file:routing/VRF/test_vrf.py ++file:routing/VRF/test_vrf_scale.py ++file:routing/test_arp_static_route_long_run.py ++file:routing/test_l3_performance.py ++file:routing/test_ndp.py ++file:sanity/test_sanity_l3.py ++file:switching/test_vlan.py ++file:system/test_sflow.py ++file:system/test_snapshot.py ++file:system/threshold/test_threshold.py + +#BUM Unsupported +-test:test_ft_stormcontrol_verification +-test:test_ft_stormcontrol_portchannel_intf +-test:test_ft_stormcontrol_incremental_bps_max_vlan +-test:test_ft_stormcontrol_fast_reboot +-test:test_ft_stormcontrol_warm_reboot +-test:test_base_line_vlan_create_delete_and_mac_learning_with_bum +-test:test_base_line_mac_move_single_vlan +-test:test_base_line_mac_move_across_vlans +-test:test_ft_lacp_graceful_restart_with_cold_boot +-test:test_ft_lacp_graceful_restart_with_save_reload +-test:test_ft_copp_igmp + +#STP +-test:test_stp_mac_perf +-test:test_stp_mac_move +-test:test_FtOpSoSwL2FwdFnCeta31752 + +#KLISH Specific +-test:test_ft_apply_shaper_multilple_ports +-test:test_nt_apply_invalid_queue_shaper_to_interface +-test:test_nt_port_pir_greater_than_speed +-test:test_nt_queue_pir_greater_than_speed +-test:test_nt_queue_cir_gt_eq_pir +-test:test_nt_attach_policy_invalid_interface +-test:test_nt_attach_invalid_shaper +-test:test_nt_delete_active_shaper_profile +-test:test_nt_policy_name_string_max +-test:test_ft_change_port_queue_shaper_rate_all_params_on_fly +-test:test_ft_change_port_queue_shaper_rate_on_fly +-test:test_ft_change_queue_shaper_rate_on_fly +-test:test_ft_change_port_shaper_rate_on_fly +-test:test_l2fwd_mac_move_dampening +-test:test_reload_with_mac_dampening +-test:test_ft_sf_all_buffer_stats_using_unicast_traffic +-test:test_ft_snmp_basic_counters +-test:test_ft_snmp_trap_counter +-test:test_ft_snmp_counter_negative_tests +-test:test_ft_pfc_sym + +#Rest/GNMI +-test:test_ft_sflow_rest_conf +-test:test_ft_sflow_gnmi +-test:test_mgmt_vrf_gnmi +-test:test_ft_dynamic_nat_warmboot +-test:test_ft_sflow_collector_port_poch + +#ERSPAN +-test:test_ft_mirroring_erspan +-test:test_ft_mirroring_erspan_span_check +-test:test_ft_mirroring_erspan_port +-test:test_ft_mirroring_erspan_port_chnl +-test:test_ft_mirroring_erspan_vlan +-test:test_ft_mirroring_max_session +-test:test_ft_mirroring_gnmi +-test:test_ft_mirroring_on_vlan_rx + +#PortChannel +-test:test_ft_member_state_after_interchanged_the_members_across_portchannels + +#BFD +-test:test_ft_bgp_unnumbered_bfd + +#SNMP MIB +-test:test_ft_snmp_if_mib_all +-test:test_ft_snmp_entity_mib_all +-test:test_ft_snmp_entity_sensor_mib +-test:test_ft_snmp_dot1q_dot1db_mib +-test:test_ft_snmp_dot1q_dot1db_mib +-test:test_ft_snmp_ipcidr_route_table +-test:test_ft_snmp_ifx_table +-test:test_ft_snmp_ent_physical_table +-test:test_ft_snmp_ent_phy_sensor_table +-test:test_ft_snmp_dot1d_base_bridge_address +-test:test_ft_snmp_dot1d_base_num_ports +-test:test_ft_snmp_dot1d_base_type +-test:test_ft_snmp_dot1d_base_port +-test:test_ft_snmp_dot1d_base_port_ifindex +-test:test_ft_snmp_dot1d_base_port_delay_exceeded_discards +-test:test_ft_snmp_dot1d_base_port_mtu_exceeded_discards +-test:test_ft_snmp_dot1d_tp_aging_time +-test:test_ft_snmp_dot1q_fdb_dynamic_count +-test:test_ft_snmp_dot1q_tp_fdb_port +-test:test_ft_snmp_dot1q_tp_fdb_status +-test:test_ft_snmp_dot1q_vlan_current_egress_ports +-test:test_ft_snmp_dot1q_vlan_current_untagged_ports +-test:test_ft_snmp_dot1q_vlan_static_untagged_ports +-test:test_ft_snmp_dot1q_vlan_static_row_status +-test:test_ft_snmp_dot1q_pvid +-test:test_ft_snmp_dot1q_vlan_static_name +-test:test_ft_snmp_dot1q_vlan_static_egress_ports +-test:test_ft_snmp_dot1q_vlan_version_number +-test:test_ft_snmp_dot1q_max_vlanid +-test:test_ft_snmp_dot1q_max_supported_vlans +-test:test_ft_snmp_dot1q_num_vlans +-test:test_ft_snmp_dot1q_vlan_num_deletes +-test:test_ft_snmp_vlan_static_table +-test:test_ft_snmp_dot1q_vlan_index +-test:test_ft_snmp_dot1q_tp_fdb_address +-test:test_ft_snmp_dot1q_fdb_table +-test:test_ft_snmp_link_down_trap +-test:test_ft_snmp_link_down_trap +-test:test_ft_snmp_link_up_trap +-test:test_ft_snmp_warmstart_trap +-test:test_ft_snmp_sysContact #Runtime Arguments ++args:--env SPYTEST_SHUTDOWN_FREE_PORTS 1 +args:--env SPYTEST_HOOKS_BREAKOUT_UITYPE click +args:--device-feature-group upstream +args:--module-init-max-timeout 9000 +args:--tc-max-timeout 5400 +args:--logs-level=debug -#InfraModuleNeededforNodeDownScenarios -+file:batch/test_spytest_infra_1.py - diff --git a/spytest/reporting/suites/community-legacy b/spytest/reporting/suites/community-legacy old mode 100755 new mode 100644 diff --git a/spytest/reporting/suites/community-ptf b/spytest/reporting/suites/community-ptf old mode 100755 new mode 100644 index 5e56c731b37..48ebc77ebbe --- a/spytest/reporting/suites/community-ptf +++ b/spytest/reporting/suites/community-ptf @@ -1,14 +1,30 @@ -# Modules -+file:qos/test_qos_save_reboot.py -+file:system/crm/test_crm.py + ++suite:community-ixia + +# test cases requiring large number of hosts +-file:routing/L3PerfSclEnhancement/test_L3SclEnh_UserVrf.py +-file:routing/L3PerfSclEnhancement/test_L3SclEnh_DefVrf_IP.py + +# test cases requiring link state propagation +-test:test_ft_arp_entry_link_failure + +# test cases that need line rate traffic +-test:test_ft_tf_queue_thre_con_multicast +-test:test_ft_tf_queue_thre_con_unicast +-test:test_ft_tf_pg_thre_con_shared + +# test cases that need high rate traffic +-test:test_ft_sflow_sampling_sFlow_collector_management_port +-test:test_ft_sflow_sampling_v4_sFlow_collector +-test:test_ft_sflow_sampling_v6_sFlow_collector +-test:test_ft_l3_performance_enhancements_v4_route_intstall_withdraw +-test:test_ft_l3_performance_enhancements_v4_bgp_session_failover_convergence_time #Runtime Arguments ++args:--env SPYTEST_SHUTDOWN_FREE_PORTS 1 +args:--env SPYTEST_HOOKS_BREAKOUT_UITYPE click +args:--device-feature-group upstream +args:--module-init-max-timeout 9000 +args:--tc-max-timeout 5400 +args:--logs-level=debug -#InfraModuleNeededforNodeDownScenarios -+file:batch/test_spytest_infra_1.py - diff --git a/spytest/reporting/tcmap.csv b/spytest/reporting/tcmap.csv index 62b0f7ae867..fd61a39082b 100644 --- a/spytest/reporting/tcmap.csv +++ b/spytest/reporting/tcmap.csv @@ -669,6 +669,7 @@ Arlo+,daily,Error Handling - Phase 1,ft_eh_clear_redis,test_ft_eh_redis_cli Arlo+,daily,Error Handling - Phase 1,ft_eh_show_redis,test_ft_eh_redis_cli Arlo+,daily,Error Handling - Phase 1,ft_eh_rt_bgp_plus_static_routes,test_ft_eh_rt_bgp_plus_static_routes Arlo+,daily,Error Handling - Phase 1,ft_eh_bgp_route_notify,test_ft_eh_bgp_route_notify +Arlo+,daily,Error Handling - Phase 1,ft_eh_cetasonic21955,test_ft_eh_cetasonic21955 Arlo+,daily,PFC,FtOpSoSysPFCFn022,test_ft_pfc_congestion_in_both_duts Arlo+,daily,PVST,ft_pvst_portpriority,test_ft_pvst_cost_priority Arlo+,daily,PVST,ft_pvst_portcost_in_single_instance,test_ft_pvst_cost_priority @@ -1373,6 +1374,9 @@ Buzznik,buzznik,SNMP MIB and Traps,SNMPOspfTr001,test_ft_snmp_ospf_bgp_trap Buzznik,buzznik,SNMP MIB and Traps,SNMPOspfTr002,test_ft_snmp_ospf_bgp_trap Buzznik,buzznik,SNMP MIB and Traps,SNMPBgpTr001,test_ft_snmp_ospf_bgp_trap Buzznik,buzznik,SNMP MIB and Traps,SNMPBgpTr002,test_ft_snmp_ospf_bgp_trap +Buzznik+,buzznikplusmr,SNMP Counters,test_ft_snmp_basic_counters,test_ft_snmp_basic_counters +Buzznik+,buzznikplusmr,SNMP Counters,test_ft_snmp_trap_counter,test_ft_snmp_trap_counter +Buzznik+,buzznikplusmr,SNMP Counters,test_ft_snmp_counter_negative_tests,test_ft_snmp_counter_negative_tests Buzznik,buzznik,RADIUS,FtOpSoScRaFn007,test_ft_security_config_mgmt_verifying_config_with_save_fast_reboot Buzznik,buzznik,RADIUS,FtOpSoScRaFn006,test_ft_security_config_mgmt_verifying_config_with_save_reboot Buzznik,buzznik,Port Mirroring on Port Channel and VLAN,FtOpSoSysPoPmFn002,test_ft_mirroring_on_port_both @@ -1584,6 +1588,8 @@ Buzznik,buzznik,ACL Rate Limiting,ft_acl_rate_limit_remove_policy,test_ft_acl_ra Buzznik,buzznik,ACL Rate Limiting,ft_acl_rate_limit_unbind_interface,test_ft_acl_rate_limit_unbind_interface Buzznik,buzznik,ACL Rate Limiting,ft_acl_rate_limit_shut_noshut_interface,test_ft_acl_rate_limit_shut_noshut_interface Buzznik,buzznik,ACL Rate Limiting,ft_acl_rate_limit_nonexist_policy,test_ft_acl_rate_limit_nonexist_policy +Buzznik+,buzznikplusmr,ACL Rate Limiting,test_ft_acl_l2_ipv4_ipv6,test_ft_acl_l2_ipv4_ipv6 +Buzznik+,buzznikplusmr,ACL Rate Limiting,test_ft_acl_l2_l3_mirroring,test_ft_acl_l2_l3_mirroring Buzznik,buzznik,Tailstamping,ft_tail_ts_behaviour,test_ft_tail_ts_functionality Buzznik,buzznik,Tailstamping,ft_tail_ts_port_mtu_config,test_ft_tail_ts_port_mtu_config Buzznik,buzznik,Tailstamping,ft_tail_ts_cpu_pkts_time_stamped,test_ft_tail_ts_functionality @@ -2728,7 +2734,6 @@ Buzznik+,buzznik,Regression,mirror_action_config_fast_reboot,test_ft_system_conf Buzznik+,buzznik,Regression,ft_tf_fast_reload,test_ft_system_config_mgmt_verifying_config_with_save_fast_reboot Buzznik+,buzznik,Regression,FtOpSoSysMTUCmFn003,test_ft_system_config_mgmt_verifying_config_with_save_fast_reboot Buzznik+,buzznik,Regression,ft_crm_fast_reload,test_ft_system_config_mgmt_verifying_config_with_save_fast_reboot -Buzznik+,buzznik,Regression,ft_ifa_save_reload,test_ft_system_config_mgmt_verifying_config_with_save_reboot_cloud_advance Buzznik+,buzznik,Regression,FtOpSoSyErspanFn021,test_ft_erspan_config_upload_save_reload_reboot Buzznik+,buzznik,Regression,FtOpSoSyErspanFn020,test_ft_erspan_portchannel_shut_noshut Buzznik+,buzznik,Regression,FtOpSoSyBsOc017_bmc,test_ft_pddf_bmc_debug_command @@ -2736,6 +2741,7 @@ Buzznik+,buzznikplusmr,Convergence,EVPN_L2BUM,test_convergence_l2_unknown Buzznik+,buzznikplusmr,Convergence,EVPN_L3Scale,test_convergence_l3_scale Buzznik+,buzznikplusmr,Convergence,EVPN_ECMP,test_convergence_ecmp Buzznik+,buzznikplusmr,Convergence,Underlay_ECMP,test_convergence_ecmp_underlay +Buzznik+,buzznikplusmr,Convergence,Reboot_convergence,test_reboot_convergence Buzznik+,buzznikplusmr,L2 VxLAN,test_FtOpSoRoEvpn5549Ft3221,test_FtOpSoRoEvpn5549Ft3221 Buzznik+,buzznikplusmr,L2 VxLAN,test_FtOpSoRoEvpn5549Ft3421,test_FtOpSoRoEvpn5549Ft3421 Buzznik+,buzznikplusmr,L2 VxLAN,test_FtOpSoRoEvpn5549Ft32227,test_FtOpSoRoEvpn5549Ft32227 @@ -2821,6 +2827,92 @@ Buzznik+,buzznikplusmr,L2 LVTEP,test_FtOpSoRoEvpnLvtepFtCeta32875,test_FtOpSoRoE Buzznik+,buzznikplusmr,Regression,ft_ceta_31902,test_Ceta_31902 Buzznik+,buzznikplusmr,Regression,ft_snmp_mib_memory,test_ft_snmp_mib_memory Buzznik+,buzznikplusmr,Regression,ft_netinstall_warm_reboot,test_ft_netinstall_warm_reboot -Buzznik+,buzznikplusmr,Regression,l2fwd_mac_move_dampening,test_l2fwd_mac_move_dampening -Buzznik+,buzznikplusmr,Regression,reload_with_mac_dampening,test_reload_with_mac_dampening +Buzznik+,buzznikplusmr,Mac Move Dampening,l2fwd_mac_move_dampening,test_l2fwd_mac_move_dampening +Buzznik+,buzznikplusmr,Mac Move Dampening,reload_with_mac_dampening,test_reload_with_mac_dampening Buzznik+,buzznikplusmr,Regression,ft_dynamic_nat_warmboot,test_ft_dynamic_nat_warmboot +Buzznik,buzznik,Regression,ft_snmp_ifXTable,test_ft_snmp_ifx_table +Buzznik,buzznik,Regression,ft_snmp_ip_System_Stats_Table,test_ft_snmp_ip_System_Stats_Table +Buzznik,buzznik,Regression,ft_snmp_ip_IfStats_Table,test_ft_snmp_ip_IfStats_Table +Buzznik,buzznik,Regression,ft_snmp_ip_Address_Table,test_ft_snmp_ip_Address_Table +Buzznik,buzznik,Regression,ft_snmp_ip_NetToPhysical_Table,test_ft_snmp_ip_NetToPhysical_Table +Buzznik,buzznik,Regression,ft_snmp_icmp_Msgs,test_ft_snmp_icmp_Msgs +Buzznik,buzznik,Regression,ft_snmp_tcp_mib,test_ft_snmp_tcp_mib +Buzznik,buzznik,Regression,ft_snmp_udp_mib,test_ft_snmp_udp_mib +Buzznik,buzznik,Regression,ft_snmp_snmpv2_mib,test_ft_snmp_snmpv2_mib +Buzznik,buzznik,Regression,ft_snmp_host_resource_mib,test_ft_snmp_host_resource_mib +Buzznik,buzznik,Regression,ft_snmp_framework_mib,test_ft_snmp_framework_mib +Buzznik,buzznik,Regression,ft_snmp_mpd_mib,test_ft_snmp_mpd_mib +Buzznik,buzznik,Regression,ft_snmp_target_mib,test_ft_snmp_target_mib +Buzznik,buzznik,Regression,ft_snmp_notification_mib,test_ft_snmp_notification_mib +Buzznik,buzznik,Regression,ft_snmp_user_based_sm_mib,test_ft_snmp_user_based_sm_mib +Buzznik,buzznik,Regression,ft_snmp_view_based_acm_mib,test_ft_snmp_view_based_acm_mib +Buzznik,buzznik,Regression,ft_snmp_ent_physical_table,test_ft_snmp_ent_physical_table +Buzznik,buzznik,Regression,ft_snmp_ent_phy_sensor_table,test_ft_snmp_ent_phy_sensor_table +Buzznik,buzznik,Regression,ft_snmp_dot3_stats_table,test_ft_snmp_dot3_stats_table +Buzznik,buzznik,Regression,ft_net_snmp_agent_mib,test_ft_net_snmp_agent_mib +Buzznik,buzznik,Regression,ft_net_snmp_vacm_mib,test_ft_net_snmp_vacm_mib +Buzznik,buzznik,Regression,ft_snmp_ucd_diskio_mib,test_ft_snmp_ucd_diskio_mib +Buzznik,buzznik,Regression,ft_snmp_ucd_memory,test_ft_snmp_ucd_memory +Buzznik,buzznik,Regression,ft_snmp_ucd_la_table,test_ft_snmp_ucd_la_table +Buzznik,buzznik,Regression,ft_snmp_ucd_system_stats,test_ft_snmp_ucd_system_stats +Buzznik,buzznik,Regression,ft_snmp_lldp_prem_manaddr_table,test_ft_lldp_rem_man_addr_table +Buzznik,buzznik,Regression,ft_snmp_ospf_mib,test_ft_snmp_ospf_bgp_mib +Buzznik,buzznik,Regression,ft_snmp_bgp4_mib,test_ft_snmp_ospf_bgp_mib +Buzznik,buzznik,Regression,ft_snmp_scaling01,test_ft_snmp_max_vlan_scale +Buzznik,buzznik,Regression,ft_snmp_scaling02,test_scale_vlans_macs +Buzznik,buzznik,Regression,ft_snmp_scaling03,test_L3Scl_vrf_006 +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_permit_any_any,test_acl_est_permit_any_any +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_on_the_fly,test_acl_est_on_the_fly +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_rst_flag_set,test_acl_est_rst_flag_set +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_specific_prefix,test_acl_est_specific_prefix +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_specific_port,test_acl_est_specific_port +Buzznik+,buzznikplusmr,IP ACL established,ft_aclv6_est_permit_any_any,test_aclv6_est_permit_any_any +Buzznik+,buzznikplusmr,IP ACL established,ft_aclv6_est_on_the_fly,test_aclv6_est_on_the_fly +Buzznik+,buzznikplusmr,IP ACL established,ft_aclv6_est_rst_flag_set,test_aclv6_est_rst_flag_set +Buzznik+,buzznikplusmr,IP ACL established,ft_aclv6_est_specific_prefix,test_aclv6_est_specific_prefix +Buzznik+,buzznikplusmr,IP ACL established,ft_aclv6_est_specific_port,test_aclv6_est_specific_port +Buzznik+,buzznikplusmr,IP ACL established,FtOpSoRoacltcpflags01,test_acl_tcpflags +Buzznik+,buzznikplusmr,IP ACL established,FtOpSoRoacltcpflags02,test_acl_tcpflags +Buzznik+,buzznikplusmr,IP ACL established,FtOpSoRoacltcpflags03,test_acl_tcpflags_ipv6 +Buzznik+,buzznikplusmr,IP ACL established,FtOpSoRoacltcpflags04,test_acl_tcpflags_ipv6 +Buzznik+,buzznikplusmr,IP ACL established,FtOpSoRoacltcpflags05,test_acl_tcpflags_03 +Buzznik+,buzznikplusmr,IP ACL established,FtOpSoRoacltcpflags06,test_acl_tcpflags_03 +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_permit_any_any_egress,test_acl_est_permit_any_any_egress +Buzznik+,buzznikplusmr,IP ACL established,ft_acl_est_on_the_fly_egress,test_acl_est_on_the_fly_egress +Buzznik+,buzznikplusmr,IP ACL established,ft_aclv6_est_specific_egress_prefix,test_aclv6_est_specific_egress_prefix +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn041,test_ft_verify_ntp_sych +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn042,test_ft_verify_ntp_sych +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn056,test_verify_rm_add_route_leak_in_user_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn033,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn035,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn036,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn037,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn038,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn058,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn060,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn032,test_check_ntp_authetication +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn034,test_verify_ntp_synch_after_shut_no_shut +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn029,test_verify_ntp_synch_after_save_reload +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn031,test_verify_ntp_synch_after_warm_boot +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn028,test_verify_ntp_synch_after_fast_boot +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn030,test_check_ntp_with_unreachable_server +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn026,test_verify_ntp_server_client_sync_when_eth0_and_client_in_default_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn052,test_verify_ntp_server_client_sync_when_eth0_and_client_in_default_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn053,test_verify_ntp_server_client_sync_when_eth0_and_client_in_default_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn054,test_verify_ntp_server_client_sync_when_eth0_and_client_in_default_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn040,test_verify_ntp_server_client_sync_when_port_channel_configured +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn048,test_verify_ntp_with_vrf_and_with_out_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn049,test_verify_ntp_with_vrf_and_with_out_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn050,test_verify_ntp_with_vrf_and_with_out_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn051,test_verify_ntp_with_vrf_and_with_out_vrf +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn039,test_verify_switch_over_ntp_servers_working +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn055,test_verify_switch_over_ntp_servers_working +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn044,test_verify_ntp_sych_v6 +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn045,test_verify_ntp_sych_v6 +Buzznik+,buzznikplusmr,NTP Server Authentication,FtOpSoSysNTPFn046,test_verify_ntp_sych_v6 +Buzznik+,buzznikplusmr,Mac Move Dampening,FtOpSoSwMacDampeningDefaultThreshold,test_stp_mac_move +Buzznik+,buzznikplusmr,Mac Move Dampening,FtOpSoSwMacDampeningNonDefaultThreshold,test_l2fwd_mac_move_dampening +Buzznik+,buzznikplusmr,Mac Move Dampening,FtOpSoSwMacDampeningDisabledPortMacRelearn,test_l2fwd_mac_move_dampening +Buzznik+,buzznikplusmr,Mac Move Dampening,FtOpSoSwMacDampeningTimerProgression,test_l2fwd_mac_move_dampening +Buzznik+,buzznikplusmr,Mac Move Dampening,FtOpSoSwMacDampeningChangePortVlan,test_reload_with_mac_dampening +Buzznik+,buzznikplusmr,Mac Move Dampening,FtOpSoSwMacDampeningReload,test_reload_with_mac_dampening diff --git a/spytest/spytest/batch.py b/spytest/spytest/batch.py index 2f165324ae6..41e3a747a9a 100644 --- a/spytest/spytest/batch.py +++ b/spytest/spytest/batch.py @@ -79,7 +79,8 @@ def load_module_csv(): # append augmented lines for line in wa.augment_modules_csv: - for row in csv.reader(line): + line2 = " ".join(utils.make_list(line)) + for row in csv.reader([line2]): rows.append(row) # parse the rows @@ -1280,7 +1281,7 @@ def parse_args(numprocesses, buckets_csv, logs_path, augment_modules_csv): for i,part in enumerate(parts): os.environ["SPYTEST_TESTBED_FILE_gw{}".format(i)] = part os.environ["SPYTEST_TESTBED_FILE"] = parts[0] - if numprocesses or count > 1: + if numprocesses or count > 1 or buckets_csv: os.environ["SPYTEST_BATCH_RUN"] = "1" numprocesses2 = len(wa.slaves) or 1 numprocesses = numprocesses or 0 diff --git a/spytest/spytest/env.py b/spytest/spytest/env.py index 4a40ad4cbbf..a7bc687e22e 100644 --- a/spytest/spytest/env.py +++ b/spytest/spytest/env.py @@ -73,6 +73,9 @@ "SPYTEST_BGP_API_UITYPE": "", "SPYTEST_BGP_CFG_API_UITYPE": "", "SPYTEST_BGP_SHOW_API_UITYPE": "", + "SPYTEST_RECOVERY_CTRL_C": "1", + "SPYTEST_RECOVERY_CTRL_Q": "1", + "SPYTEST_SOFT_TGEN_WAIT_MULTIPLIER": "2", } dev_defaults = { diff --git a/spytest/spytest/feature.py b/spytest/spytest/feature.py index 77f6c7580b2..393d81e298b 100644 --- a/spytest/spytest/feature.py +++ b/spytest/spytest/feature.py @@ -43,7 +43,13 @@ "show-vrf-verbose-command", "vrf-needed-for-unbind", "show-kdump-status-command", - "show-mac-aging-time-command" + "show-mac-aging-time-command", + "config_mirror_session_add_type", + "config_static_portchannel", + "config_max_route_scale", + "sai-removes-vlan-1", + "nat-default-enabled", + "sflow-default-enabled", ] class Feature(object): @@ -94,6 +100,8 @@ def init_upstream(self): self.set_supported("crm-all-families") self.set_supported("crm-config-clear-command") self.set_supported("config-ipv6-command") + self.set_supported("show-mac-count-command") + self.set_supported("show-interfaces-counters-detailed-command") def init_broadcom(self): self.set_supported(feature_names) diff --git a/spytest/spytest/framework.py b/spytest/spytest/framework.py index 0c43748b5f2..0b30e988ba9 100644 --- a/spytest/spytest/framework.py +++ b/spytest/spytest/framework.py @@ -70,6 +70,7 @@ ("PASS", "Pass"), ("FAIL", "Fail"), ("DUTFAIL", "Dut Fail"), + ("TGENFAIL", "TGen Fail"), ("SCRIPTERROR", "Script Error"), ("UNSUPPORTED", "Not Supported"), ("CONFIGFAIL", "Config Fail"), @@ -78,7 +79,6 @@ ("SKIPPED", "Skipped"), ("TIMEOUT", "Timeout"), ("TOPOFAIL", "Topo Fail"), - ("TGENFAIL", "TGen Fail") ]) report_cols = ["Execution Started", "Execution Completed", "Execution Time", \ @@ -196,7 +196,6 @@ def __init__(self, wa, cfg): self.version_msg = "VERSION: {}".format(get_git_ver()) self.hostname = "HOSTNAME: {}".format(socket.gethostname()) self.cmdline_args = env.get("SPYTEST_CMDLINE_ARGS", "") - self.suite_args = env.get("SPYTEST_SUITE_ARGS", "") self.cmdline_args = "ARGS: {}".format(self.cmdline_args) [self.user_root, self.logs_path, self.slave_id] = _get_logs_path() if not self.user_root: @@ -214,14 +213,7 @@ def __init__(self, wa, cfg): self.execution_start_time = get_timenow() self.log.info("") self.log.info("Execution Start Time: {}".format(self.execution_start_time)) - self.log.info(self.version_msg) - self.log.info(self.hostname) - self.log.info(self.root_path) - self.log.info("Python: {}.{}.{}".format(sys.version_info.major, - sys.version_info.minor, sys.version_info.micro)) - self.log.info(self.cmdline_args) - if self.suite_args: - self.log.info("SUITE ARGS: {}".format(self.suite_args)) + self.log_verion_info() self.log.info("LOGS PATH: {}".format(self.logs_path)) if os.path.exists(os.path.join(self.logs_path, "node_used")): self.cfg.load_image = "none" @@ -316,6 +308,19 @@ def __init__(self, wa, cfg): utils.delete_file(paths.get_stats_txt(self.logs_path)) utils.delete_file(os.path.join(self.logs_path, "node_dead")) + def log_verion_info(self): + self.log.info(self.version_msg) + self.log.info(self.hostname) + self.log.info(self.root_path) + self.log.info("Python: {}.{}.{}".format(sys.version_info.major, + sys.version_info.minor, sys.version_info.micro)) + self.log.info(self.cmdline_args) + suite_args = env.get("SPYTEST_SUITE_ARGS", "") + if suite_args: + self.log.info("SUITE ARGS: {}".format(suite_args)) + if self.topo_str: + self.log.info(self.topo_str) + def generate_defaults_report(self): rows = [] for name, value in cmdargs.get_default_all(): @@ -811,6 +816,10 @@ def wait(self, val, msg=None): self.net.wait(val) def tg_wait(self, val, msg=None): + if self.is_soft_tgen(): + multiplier = env.get("SPYTEST_SOFT_TGEN_WAIT_MULTIPLIER", "2") + multiplier = utils.integer_parse(multiplier, 2) + val = val * multiplier if msg: self.log("TG Sleep for {} sec(s)...{}".format(val, msg)) else: @@ -831,6 +840,12 @@ def report_tc_fail(self, tcid, msgid, *args): def report_tc_unsupported(self, tcid, msgid, *args): self._context.report_tc(tcid, "Unsupported", msgid, *args) + def report_msg(self, msgid, *args): + msg, _ = self._context.result.msg(msgid, *args) + line = utils.get_line_number(2) + self._context.log.info("RMSG: {} @line {}".format(msg, line)) + return msg + def report_pass(self, msgid, *args): """ Infrastructure API used by test scripts to report pass @@ -2053,8 +2068,8 @@ def _post_module_prolog(self, name, success_status): self._foreach_dev(self._apis_instrument_dut, "post-module-prolog") def _post_module_epilog(self, name, success_status): - if not self.abort_module_msg: - self._create_module_logs("post-module-epilog", name) + #if self.abort_module_msg: return + self._create_module_logs("post-module-epilog", name) def _pre_module_init(self, name): if not self.abort_module_msg: @@ -2241,10 +2256,7 @@ def module_log_init(self, module_name): self._context.log.module_log_init(module_name) if module_name: #self.log("Execution Start Time: {}".format(self._context.execution_start_time)) - self.log(self._context.version_msg) - self.log(self._context.root_path) - self.log(self._context.cmdline_args) - self.log(self._context.topo_str) + self._context.log_verion_info() if self.modules_completed: self.log("Previous Module: {}".format(self.modules_completed[-1])) @@ -3848,6 +3860,8 @@ def _parse_suite_files(suites, fin, fex, tin, tex): fex.append(line[6:].strip()) elif line.startswith("-test:"): tex.append(line[6:].strip()) + elif line.startswith("+test:"): + tin.append(line[6:].strip()) elif line.startswith("+args:"): args.extend(line[6:].strip().split()) if csuites: @@ -3860,6 +3874,9 @@ def parse_suite_files(suites): opts.extend(["--ignore", f]) for t in tin: if t not in tex: + if fin: + # This is applicable only when files are not specified + continue opts.extend(["--tclist-csv", t]) for t in tex: opts.extend(["--tclist-csv-exclude", t]) @@ -4068,6 +4085,23 @@ def modify_tests(config, items): exclude_test_names.extend(_build_tclist_csv(config, "--tclist-csv-exclude") or []) _add_repeat_tests(exclude_test_names, items) + ############################################################################ + # exclude the test function is not present in TEST PATHS + ############################################################################ + testpaths = [] + for testpath in env.get("SPYTEST_TEST_PATHS", "").split(","): + testpaths.append(os.path.abspath(testpath) + "/") + if testpaths: + for item in items: + exclude, fspath = False, str(item.fspath) + for testpath in testpaths: + if fspath.startswith(testpath): + exclude = True + break + if not exclude: + exclude_test_names.append(item.name) + ############################################################################ + if test_names or exclude_test_names: selected, deselected = _build_selected_tests(items, test_names, exclude_test_names) items[:] = selected @@ -4775,6 +4809,8 @@ def generate_module_report(results_csv, tcresults_csv, offset=0): def init_module(name, fallback, ro_global): module = OrderedDict() module["Pass Rate"] = 0 + module["TC Count"] = tc_all.get(name, 0) + module["TC Pass"] = tc_pass.get(name, 0) module["Sys Logs"] = 0 module["CDT"] = 0 module["FCLI"] = 0 @@ -4793,8 +4829,6 @@ def init_module(name, fallback, ro_global): if res: module[res] = 0 if offset: module["Node"] = "" - module["TC Count"] = tc_all.get(name, 0) - module["TC Pass"] = tc_pass.get(name, 0) return module for row in rows: diff --git a/spytest/spytest/gnmi/translator.py b/spytest/spytest/gnmi/translator.py index 45e36f3a4ca..619382cbec6 100644 --- a/spytest/spytest/gnmi/translator.py +++ b/spytest/spytest/gnmi/translator.py @@ -28,11 +28,9 @@ def unescape(txt=''): tmp = '' while txt and txt != tmp: tmp = str(txt) - try: - txt = unquote(tmp, encoding='utf-8', errors='replace').replace(u"\u200b", '') - except Exception: - txt = unquote(tmp) - return txt + try: txt = unquote(tmp) + except Exception: pass + return txt.replace(u"\u200b", '') def escapeKeyValue(val): return str(val).replace("\\", "\\\\").replace("]", "\\]") @@ -63,7 +61,7 @@ def getAttrValLists(path, var): else: vals[i] = '' return attrs, vals - + def toRest(path='', var={}, method='get', json=None): ''' Covert template path to Rest path ''' method = toMethod(method) @@ -89,7 +87,8 @@ def toGNMI(path='', var={}, action='get', data=None): if (action.lower() == 'create'): action = 'UPDATE' if isinstance(body, dict): - tk = list(filter(lambda t: t and len(t), path.split('/')))[-1] + tk = [t for t in path.split('/') if t and len(t)] + tk = tk[-1] if tk else "" if tk: m = re.search(r'^\s*(\S+)=\{', tk) if m and m.group(1): diff --git a/spytest/spytest/infra.py b/spytest/spytest/infra.py index d48365867c3..b200b0b52c9 100644 --- a/spytest/spytest/infra.py +++ b/spytest/spytest/infra.py @@ -37,6 +37,9 @@ def report_tc_fail(tcid, msgid, *args): def report_tc_unsupported(tcid, msgid, *args): getwa().report_tc_unsupported(tcid, msgid, *args) +def report_msg(msgid, *args): + return getwa().report_msg(msgid, *args) + def report_pass(msgid, *args): """ Infrastructure API used by test scripts to report pass diff --git a/spytest/spytest/main.py b/spytest/spytest/main.py index ad0f1b5195f..e235b91e888 100644 --- a/spytest/spytest/main.py +++ b/spytest/spytest/main.py @@ -32,6 +32,8 @@ def _parse_args(pre_parse=False): help="testbed file path -- default: ./testbed.yaml") parser.add_argument("--test-suite", action="append", default=[], help="test suites") + parser.add_argument("--test-paths", action="append", + default=[], help="test paths") parser.add_argument("--tclist-file", action="append", default=None, help="test case list file path") parser.add_argument("--logs-path", action="store", @@ -81,14 +83,14 @@ def _parse_args(pre_parse=False): # update sys.argv with arguments from suite args if args.test_suite: addl_args = parse_suite_files(args.test_suite) - print("\nSuite Arguments {}\n".format(" ".join(addl_args))) index = sys.argv.index("--test-suite") new_argv = [] new_argv.extend(sys.argv[:index]) new_argv.extend(addl_args) new_argv.extend(sys.argv[index+2:]) sys.argv = new_argv - #print("\nSuite Arguments {}\n".format(" ".join(sys.argv))) + print("\nSuite Arguments {}\n".format(" ".join(addl_args))) + print("\nExpanded Arguments {}\n".format(" ".join(sys.argv))) os.environ["SPYTEST_SUITE_ARGS"] = " ".join(addl_args) return _parse_args(pre_parse=pre_parse) @@ -148,6 +150,9 @@ def _parse_args(pre_parse=False): if args.open_config_api: os.environ["SPYTEST_OPENCONFIG_API"] = args.open_config_api + if args.test_paths: + os.environ["SPYTEST_TEST_PATHS"] = ",".join(args.test_paths) + prefix="" prefix="results" if args.results_prefix: diff --git a/spytest/spytest/net.py b/spytest/spytest/net.py index 59c8b14e011..aba707a7320 100644 --- a/spytest/spytest/net.py +++ b/spytest/spytest/net.py @@ -440,15 +440,17 @@ def _find_prompt_old(self, access, net_connect=None, count=15, sleep=2, line=Non time.sleep(wait_time) if i % 2 == 0: - msg = "Trying CTRL+C: attempt {}..".format(i) - self.dut_log(device, msg, lvl=logging.WARNING) - try: hndl.send_command_timing("\x03") - except Exception: self.dut_log(device, "Failed to send CTRL+C", lvl=logging.ERROR) + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + msg = "Trying CTRL+C: attempt {}..".format(i) + self.dut_log(device, msg, lvl=logging.WARNING) + try: hndl.send_command_timing("\x03") + except Exception: self.dut_log(device, "Failed to send CTRL+C", lvl=logging.ERROR) elif i % 2 == 1: - msg = "Trying CTRL+Q: attempt {}..".format(i) - self.dut_log(device, msg, lvl=logging.WARNING) - try: hndl.send_command_timing("\x11") - except Exception: self.dut_log(device, "Failed to send CTRL+Q", lvl=logging.ERROR) + if env.get("SPYTEST_RECOVERY_CTRL_Q", "1") == "1": + msg = "Trying CTRL+Q: attempt {}..".format(i) + self.dut_log(device, msg, lvl=logging.WARNING) + try: hndl.send_command_timing("\x11") + except Exception: self.dut_log(device, "Failed to send CTRL+Q", lvl=logging.ERROR) # dump sysrq traces if hndl and env.get("SPYTEST_SYSRQ_ENABLE", "0") != "0": @@ -705,7 +707,8 @@ def _disconnect_device(self, devname): hndl = self._get_handle(devname) if hndl: try: - hndl.send_command_timing("\x03") + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + hndl.send_command_timing("\x03") except Exception: pass hndl.disconnect() @@ -1523,14 +1526,15 @@ def _try(self, access, line, new_line, fcli, cmd, expect_string, delay_factor, * tmp_trace_dump = self._trace_received(devname, "", hndl, None, None, line2) trace_dump.append(tmp_trace_dump) try: - msg = "Trying CTRL+C: attempt {}..".format(attempt) - self.dut_log(devname, msg, lvl=logging.WARNING) - hndl.send_command_timing("\x03") - hndl.clear_buffer() - ctrl_c_used = True - # TODO: Need to check for both testcase and module on result setting. - #if self.tc_start_time: - # self.wa.report_scripterror("command_failed_recovered_using_ctrlc", cmd) + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + msg = "Trying CTRL+C: attempt {}..".format(attempt) + self.dut_log(devname, msg, lvl=logging.WARNING) + hndl.send_command_timing("\x03") + hndl.clear_buffer() + ctrl_c_used = True + # TODO: Need to check for both testcase and module on result setting. + #if self.tc_start_time: + # self.wa.report_scripterror("command_failed_recovered_using_ctrlc", cmd) except Exception as ex2: if self.wa.is_shutting_down(): self.dut_log(devname, "run shutting down", lvl=logging.WARNING) @@ -1725,9 +1729,11 @@ def _exit_docker(self, devname, prompt=None): pass else: try: - hndl.send_command_timing("\x03") + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + hndl.send_command_timing("\x03") hndl.send_command_timing("end") - hndl.send_command_timing("\x03") + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + hndl.send_command_timing("\x03") hndl.send_command_timing("exit") new_prompt = self._find_prompt(access) except Exception: @@ -2696,6 +2702,29 @@ def wait_onie_or_login(self, devname): self.wait(1) return 0 + def update_onie_grub_config(self, devname, mode): + + # Grub commands for image download. + cmds, errs = self.wa.hooks.get_onie_grub_config(devname, mode) + upgrade_image_cmd = ";".join(cmds) + + # Issue the grub commands. + skip_error_check = False if self.wa.session_init_completed else True + self.trace_callback_set(devname, True) + cli_prompt = self._get_cli_prompt(devname) + access = self._get_dev_access(devname) + output = self._send_command(access, upgrade_image_cmd, cli_prompt, + skip_error_check, 18) + self.trace_callback_set(devname, False) + + for err_pattern in errs: + if err_pattern in output: + msg = "ONIE GRUB config failed matching error '{}' in mode {}".format(err_pattern, mode) + self.dut_log(devname, msg, logging.ERROR) + return msg + + return output + def upgrade_onie_image1(self, devname, url, max_ready_wait=0): devname = self._check_devname(devname) access = self._get_dev_access(devname) @@ -2705,19 +2734,7 @@ def upgrade_onie_image1(self, devname, url, max_ready_wait=0): self.dut_log(devname, "Image already upgraded during the time of DUT connect using ONIE process.") return True - upgrade_image_cmd = ";".join(""" - sudo apt-get -f install -y grub-common - sudo mkdir -p /mnt/onie-boot/ - sudo mount /dev/sda2 /mnt/onie-boot/ - sudo /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o rescue - sudo grub-editenv /mnt/onie-boot/grub/grubenv set diag_mode=none - sudo grub-editenv /mnt/onie-boot/grub/grubenv set onie_mode=rescue - sudo grub-editenv /host/grub/grubenv set next_entry=ONIE - sudo grub-reboot --boot-directory=/host/ ONIE - sudo umount /mnt/onie-boot/ - """.strip().splitlines()) - - self.dut_log(devname, "Upgrading image from onie '{}'.".format(url)) + self.dut_log(devname, "Upgrading image from onie1 '{}'.".format(url)) if access["filemode"]: return None @@ -2725,22 +2742,8 @@ def upgrade_onie_image1(self, devname, url, max_ready_wait=0): # ensure we are in sonic mode self._enter_linux_exit_vtysh(devname) - # Issue upgrade command. - skip_error_check = False if self.wa.session_init_completed else True - self.trace_callback_set(devname, True) - cli_prompt = self._get_cli_prompt(devname) - output = self._send_command(access, upgrade_image_cmd, cli_prompt, skip_error_check, 18) - self.trace_callback_set(devname, False) - - grub_err_patterns = ["/dev/sda2 does not exist", - "/mnt/onie-boot/onie/tools/bin/onie-boot-mode: command not found", - "No such file or directory", - "/mnt/onie-boot/: not mounted"] - for err_pattern in grub_err_patterns: - if err_pattern in output: - msg = "GRUB ONIE setting failed. Observed error pattern '{}' while setting grub-editenv.".format(err_pattern) - self.dut_log(devname, msg, logging.ERROR) - raise ValueError(msg) + # update ONIE configuration + self.update_onie_grub_config(devname, "rescue") # we need to download the helper files again self.skip_trans_helper[devname] = dict() @@ -2852,36 +2855,8 @@ def _upgrade_onie_image2(self, devname, url, max_ready_wait=0): msg = msg.format(dut_image_location) self.dut_log(devname, msg, logging.WARNING) - # Grub commands for image download. - upgrade_image_cmd = ";".join(""" - sudo apt-get -f install -y grub-common - sudo mkdir -p /mnt/onie-boot/ - sudo mount /dev/sda2 /mnt/onie-boot/ - sudo /mnt/onie-boot/onie/tools/bin/onie-boot-mode -o install - sudo grub-editenv /mnt/onie-boot/grub/grubenv set diag_mode=none - sudo grub-editenv /mnt/onie-boot/grub/grubenv set onie_mode=install - sudo grub-editenv /host/grub/grubenv set next_entry=ONIE - sudo grub-reboot --boot-directory=/host/ ONIE - sudo umount /mnt/onie-boot/ - """.strip().splitlines()) - - # Issue the grub commands. - skip_error_check = False if self.wa.session_init_completed else True - self.trace_callback_set(devname, True) - cli_prompt = self._get_cli_prompt(devname) - output = self._send_command(access, upgrade_image_cmd, cli_prompt, - skip_error_check, 18) - self.trace_callback_set(devname, False) - - grub_err_patterns = ["/dev/sda2 does not exist", - "/mnt/onie-boot/onie/tools/bin/onie-boot-mode: command not found", - "No such file or directory", - "/mnt/onie-boot/: not mounted"] - for err_pattern in grub_err_patterns: - if err_pattern in output: - msg = "GRUB ONIE setting failed. Observed error pattern '{}' while setting grub-editenv.".format(err_pattern) - self.dut_log(devname, msg, logging.ERROR) - return msg + # update ONIE configuration + self.update_onie_grub_config(devname, "install") # we need to download the helper files again self.skip_trans_helper[devname] = dict() @@ -3053,6 +3028,7 @@ def _reboot(self, devname, method="normal", skip_port_wait=False, self.wa.instrument(devname, "pre-reboot") self.trace_callback_set(devname, True) reboot_delay_factor = 10 + cmd_timetaken = None if not self._is_console_connection(devname): reboot_static_wait = 120 if onie: @@ -3074,7 +3050,7 @@ def _reboot(self, devname, method="normal", skip_port_wait=False, break time_left = time_left - 2 if time_left == 0: - msg = "Dut IP '{}' is not reachable even after pinging for '{}' secs after reboot on SSH" + msg = "DUT IP '{}' is not reachable even after pinging for '{}' secs after reboot on SSH" msg = msg.format(dut_mgmt_ip, reboot_polling_time) self.dut_log(devname, msg, logging.ERROR) return False @@ -3098,7 +3074,15 @@ def _reboot(self, devname, method="normal", skip_port_wait=False, else: if internal: expect = "|".join([user_mode, regex_login, regex_login_anywhere]) + cmd_starttime = get_timenow() output = self._send_command(access, reboot_cmd, expect, True, reboot_delay_factor) + cmd_timetaken = get_elapsed(cmd_starttime, False) + if cmd_timetaken < 100: + msg = "Not expecting the command to finish in '{}' secs." + msg = msg.format(cmd_timetaken) + msg = msg + " So waiting for a static period of 10 mins." + self.dut_log(devname, msg, lvl=logging.WARNING) + self.wait(600) else: output = self.wa.hooks.dut_reboot(devname, method=method) self.trace_callback_set(devname, False) @@ -3435,10 +3419,10 @@ def _add_config_method(self, devname, args_str): return args_str + " --load-config-method {}".format(load_config_method) def _add_core_dump_flags(self, args_str, value_list): - core_flag = value_list.pop(0) - dump_flag = value_list.pop(0) - clear_flag = value_list.pop(0) - misc_flag = value_list.pop(0) + core_flag = value_list[0] + dump_flag = value_list[1] + clear_flag = value_list[2] + misc_flag = value_list[3] core_flag = "YES" if core_flag else "NO" dump_flag = "YES" if dump_flag else "NO" clear_flag = "YES" if clear_flag else "NO" @@ -3511,7 +3495,8 @@ def _init_clean(self, devname, core, dump, misc=False): largs = [core, dump, self.cfg.clear_tech_support, misc] self._apply_remote(devname, "init-clean", largs) - def _apply_remote(self, devname, option_type, value_list=[]): + def _apply_remote(self, devname, option_type, value_list=None): + value_list = value_list or [] devname = self._check_devname(devname) access = self._get_dev_access(devname) @@ -3547,8 +3532,8 @@ def _apply_remote(self, devname, option_type, value_list=[]): execute_in_console = True # transfer the cfg files dst_file_list = [] - method = value_list.pop(0) - for name in value_list: + method = value_list[0] + for name in value_list[1:]: for src_file in utils.make_list(name): dst_file = self._upload_file2(devname, access, src_file) if dst_file: dst_file_list.append(dst_file) @@ -3557,13 +3542,13 @@ def _apply_remote(self, devname, option_type, value_list=[]): self.dut_log(devname, "Applying config files remotely '{}'".format(args_str)) skip_error_check = True elif option_type == "run-test": - timeout = value_list.pop(0) - args_str = " ".join(value_list) + timeout = value_list[0] + args_str = " ".join(value_list[1:]) delay_factor = int(math.ceil((timeout * 1.0) / 100)) elif option_type == "init-ta-config": execute_in_console = True - profile_name = value_list.pop(-1).lower() - args_str = self._add_core_dump_flags(args_str, value_list) + profile_name = value_list[-1].lower() + args_str = self._add_core_dump_flags(args_str, value_list[:-1]) args_str = args_str + " --config-profile {}".format(profile_name) if env.get("SPYTEST_NTP_CONFIG_INIT", "0") != "0": args_str = args_str + " --env SPYTEST_NTP_CONFIG_INIT 1" @@ -3623,8 +3608,8 @@ def _apply_remote(self, devname, option_type, value_list=[]): args_str = self._add_core_dump_flags(args_str, value_list) elif option_type == "update-reserved-ports": live_tracing = False - port_list = value_list.pop(0) - args_str = ' '.join(port_list) + port_list = value_list[0] + args_str = ' '.join(port_list[1:]) elif option_type == "port-defaults": execute_in_console = True args_str = "" @@ -3645,7 +3630,7 @@ def _apply_remote(self, devname, option_type, value_list=[]): elif option_type == "wait-for-ports": args_str = value_list[0] elif option_type == "config-profile": - args_str = value_list.pop(0).lower() + args_str = value_list[0].lower() #execute_in_console = bool(args_str != "na") execute_in_console = True elif option_type == "dump-click-cmds": @@ -3919,6 +3904,8 @@ def _apply_remote(self, devname, option_type, value_list=[]): return True def generate_tech_support(self, devname, name): + if self.wa.has_get_tech_support("none"): + return for _ in range(2): try: self._apply_remote(devname, "get-tech-support", [name]) @@ -5085,7 +5072,7 @@ def _send_cmd_expect_reboot(self, devname, cmd, expected_prompt, opts, new_line= msg = "Identified the same/user prompt '{0}' or '{1}' with in '{2}' secs." msg = msg.format(expected_prompt, user_mode, cmd_timetaken) msg = msg + " So waiting for a static period of 5 mins." - self.dut_log(devname, msg) + self.dut_log(devname, msg, lvl=logging.WARNING) self.wait(300) prompt = self._find_prompt(access) if prompt != expected_prompt: @@ -5358,8 +5345,9 @@ def _execute_uicli_step(self, uicli, devname, stepentry, error_patterns, ignore_ skip_error_check=True, conf=is_step_conf, confirm=confirm_command) if not isinstance(output, list) and re.search("Syntax error:", output): - hndl = self._get_handle(devname) - hndl.send_command_timing("\x03") + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + hndl = self._get_handle(devname) + hndl.send_command_timing("\x03") hndl_after_cmd = self._get_handle(devname) @@ -5487,8 +5475,9 @@ def _execute_uicli_step(self, uicli, devname, stepentry, error_patterns, ignore_ skip_error_check=True, conf=is_step_conf, confirm=confirm_command) if not isinstance(output, list) and re.search("Syntax error:", output): - hndl = self._get_handle(devname) - hndl.send_command_timing("\x03") + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + hndl = self._get_handle(devname) + hndl.send_command_timing("\x03") hndl_after_cmd = self._get_handle(devname) @@ -5621,8 +5610,9 @@ def _execute_uicli_step(self, uicli, devname, stepentry, error_patterns, ignore_ skip_error_check=True, conf=is_step_conf, confirm=confirm_command) if not isinstance(output, list) and re.search("Syntax error:", output): - hndl = self._get_handle(devname) - hndl.send_command_timing("\x03") + if env.get("SPYTEST_RECOVERY_CTRL_C", "1") == "1": + hndl = self._get_handle(devname) + hndl.send_command_timing("\x03") output = nl.join(output.split(nl)[:-1]) diff --git a/spytest/spytest/remote/service-udp.py b/spytest/spytest/remote/service-udp.py index 38f9fde4ac6..24eac9bcc3d 100644 --- a/spytest/spytest/remote/service-udp.py +++ b/spytest/spytest/remote/service-udp.py @@ -17,6 +17,7 @@ def eprint(*args, **kwargs): def udp_server(host='0.0.0.0', port=1234): port = int(os.getenv("udp_server_port", port)) + host = os.getenv("udp_server_host", host) s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((host, port)) eprint("Listening on udp %s:%s" % (host, port)) diff --git a/spytest/spytest/remote/spytest-helper.py b/spytest/spytest/remote/spytest-helper.py index c7fb7abb75f..60daf0615c8 100644 --- a/spytest/spytest/remote/spytest-helper.py +++ b/spytest/spytest/remote/spytest-helper.py @@ -30,6 +30,7 @@ tmp_frr_file = "/tmp/frr.conf" rsyslog_conf_file = "/etc/rsyslog.d/99-default.conf" tmp_rsyslog_conf_file = "/tmp/rsyslog-default.conf" +pim_config_file = "/etc/sonic/frr/pimd.conf" var_log_dir = "/var/log" spytest_dir = "/etc/spytest" @@ -321,7 +322,9 @@ def apply_file(filepath, method): if not os.path.exists(old_file + ".orig"): commands_to_execute.append("cp {0} {0}.orig".format(old_file)) commands_to_execute.append("cp {} {}".format(filepath, old_file)) - + elif filepath.endswith('.pim'): + backup_file(pim_config_file) + commands_to_execute.append("cp {} {}".format(filepath, pim_config_file)) if commands_to_execute: execute_cmds(commands_to_execute) else: diff --git a/spytest/spytest/rest.py b/spytest/spytest/rest.py index cebbd03c610..7e7402694f6 100644 --- a/spytest/spytest/rest.py +++ b/spytest/spytest/rest.py @@ -83,7 +83,7 @@ def _create_session(self, username=None, password=None): self.session.verify = False else: if self.curr_pwd: - self.session.auth = (self.username, self.curr_pwd) + self.session.auth = (self.username, self.curr_pwd ) self.session.verify = False warnings.filterwarnings('ignore', message='Unverified HTTPS request') @@ -110,7 +110,6 @@ def _get_url(self, path, *args, **kwargs): for key, value in kwargs.items(): if value: value = value.replace(" ", "%20") - value = value.replace("/", "%2F") params.append('{}={}'.format(key, value)) else: params.append(key) @@ -299,7 +298,8 @@ def apply(self, request, sections=None, operations=None, ui="rest"): def send(self, devname, method='get', api='', headers={}, params=None, data=None, verify=False, retAs='json', **kwargs): - session = self._get_session() + credentials = self._get_credentials() + session = self._get_session(username = credentials[0], password=credentials[1] or self.password or self.altpassword) url = '{}{}'.format(self.base_url,api) self._log("REST [{}]: {}".format(method.upper(), url)) diff --git a/spytest/spytest/result.py b/spytest/spytest/result.py index f3d174072f2..afa3d784693 100644 --- a/spytest/spytest/result.py +++ b/spytest/spytest/result.py @@ -114,30 +114,31 @@ def build_msg(self, name, *args): def set_default_error(self, res, code, *args): self.default_result = res - try: - if code: - self.default_desc = self.build_msg(code, *args) - else: - self.default_desc = None - except Exception as e: - print(e) - self.default_desc = "Invalid error code {} : {}".format(code, e) - self.default_result = "Fail" + if not code: + self.default_desc = None + else: + self.default_desc, msg_ok = self.msg(code, *args) + if not msg_ok: self.default_result = "Fail" return self.default_desc def set(self, res, code, *args): if self.result and self.result != "Pass": msg = "result already set to {} -- ignoring".format(self.result) - print(msg) return msg self.result = res + self.desc, msg_ok = self.msg(code, *args) + if not msg_ok: self.result = "Fail" + return self.desc + + def msg(self, code, *args): try: - self.desc = self.build_msg(code, *args) + desc = self.build_msg(code, *args) + msg_ok = True except Exception as e: print(e) - self.desc = "Invalid error code {} : {}".format(code, e) - self.result = "Fail" - return self.desc + desc = "Invalid error code {} : {}".format(code, e) + msg_ok = False + return desc, msg_ok def _build_record(self, nodeid, func, tcid, time_taken, comp, result=None, desc=None, rtype="Executed", index=0, syslog_count=0, diff --git a/spytest/spytest/rps.py b/spytest/spytest/rps.py index edac9890cbf..942e9febb1a 100644 --- a/spytest/spytest/rps.py +++ b/spytest/spytest/rps.py @@ -135,7 +135,9 @@ def merge_dict(self, d1, d2): retval.update(d2) return retval - def _rest_send(self, method=None, url=None, params={}, data=None, auth=None, headers={}): + def _rest_send(self, method=None, url=None, params=None, data=None, auth=None, headers=None): + params = params or {} + headers = headers or {} method = method or self.rest_method url = url or self.rest_url auth = auth or (requests.auth.HTTPBasicAuth(self.username, self.password) diff --git a/spytest/spytest/tgen/scapy/driver.py b/spytest/spytest/tgen/scapy/driver.py index 6971afd2bd6..b645cc94585 100755 --- a/spytest/spytest/tgen/scapy/driver.py +++ b/spytest/spytest/tgen/scapy/driver.py @@ -23,6 +23,7 @@ def __init__(self, port, dry=False, dbg=0, logger=None): self.port = port self.dry = dry self.dbg = dbg + self.errs = [] self.finished = False self.logger = logger or Logger() self.utils = Utils(self.dry, logger=self.logger) @@ -39,6 +40,13 @@ def __del__(self): self.cleanup() del self.packet + def get_alerts(self): + errs = [] + errs.extend(self.errs) + errs.extend(self.packet.get_alerts()) + self.errs = [] + return errs + def cleanup(self): print("ScapyDriver {} cleanup...".format(self.iface)) self.finished = True @@ -400,6 +408,9 @@ def send_arp(self, intf, index=0): def apply_bgp(self, op, enable, intf): return self.packet.apply_bgp(op, enable, intf) + def apply_bgp_route(self, enable, route): + return self.packet.apply_bgp_route(enable, route) + def config_igmp(self, mode, intf, host): return self.packet.config_igmp(mode, intf, host) diff --git a/spytest/spytest/tgen/scapy/logger.py b/spytest/spytest/tgen/scapy/logger.py index e5038bfd319..085a925af22 100755 --- a/spytest/spytest/tgen/scapy/logger.py +++ b/spytest/spytest/tgen/scapy/logger.py @@ -4,6 +4,7 @@ import logging import datetime import traceback +import threading from lock import Lock @@ -22,6 +23,20 @@ def get_log_lvl_name(lvl): return lvl_map[lvl] return lvl +def get_thread_name(): + name = threading.current_thread().name + name = name.replace("MainThread", "Thread-0") + try: + num = int(name.replace("Thread-", "")) + name = "T%04d: " % (num) + except Exception: pass + try: + num = int(name.replace("Pyro-Worker-", "")) + name = "P%04d: " % (num) + name = "T%04d: " % (0) + except Exception: pass + return name + class LogFormatter(object): def __init__(self): @@ -32,24 +47,43 @@ def format(self, record): elapsed_seconds = record.created - self.start_time elapsed = datetime.timedelta(seconds=elapsed_seconds) time_stamp = time_delta(elapsed) + thid = get_thread_name() lvl = get_log_lvl_name(record.levelname) try: msg = record.getMessage() except Exception: msg = "Exception getting message from record" if self.node_name: - return "{} {} {} {}".format(time_stamp, self.node_name, lvl, msg) + prefix = "{} {} {}{}".format(time_stamp, self.node_name, thid, lvl) else: - return "{} {} {}".format(time_stamp, lvl, msg) + prefix = "{} {}{}".format(time_stamp, thid, lvl) + return "{} {}".format(prefix, msg) class Logger(object): def __init__(self, dry=False, name="scapy-tgen", logs_dir = None): self.dry = dry self.dbg = 1 self.log_file = None + self.node_name = "" self.lock = Lock() self.fmt = LogFormatter() self.logger = logging.getLogger(name) + + if logs_dir: + self.logs_dir = logs_dir + else: + self.logs_dir = os.getenv("SCAPY_TGEN_LOGS_PATH", "server") + self.logs_dir2 = self.logs_dir + + if not self.dry: + self.remove_stdout() + #self.create_combined_log() + + self.log_file = None + self.file_handler = None + self.set_log_file(None) + + def remove_stdout(self): stdlog = logging.StreamHandler(sys.stdout) stdlog.setLevel(logging.ERROR if not self.dry else logging.DEBUG) ch = logging.StreamHandler() @@ -58,33 +92,38 @@ def __init__(self, dry=False, name="scapy-tgen", logs_dir = None): self.logger.addHandler(ch) self.logger.removeHandler(stdlog) self.logger.propagate = False - self.log_file = None - self.file_handler = None - if logs_dir: - self.logs_dir = logs_dir - else: - self.logs_dir = os.getenv("SCAPY_TGEN_LOGS_PATH", "server") - self.set_log_file(None) - - @staticmethod - def ensure_parent(filename): - path = os.path.dirname(filename) - path = os.path.abspath(path) - if not os.path.exists(path): - os.makedirs(path) - def set_node_name(self, name): + def ensure_parent(self, log_file): + from utils import Utils + Utils.ensure_parent(log_file) + + def create_combined_log(self): + log_file = os.path.join(self.logs_dir2, "all.log") + self.ensure_parent(log_file) + self.file_handler = logging.FileHandler(log_file) + self.file_handler.setFormatter(self.fmt) + self.file_handler.setLevel(logging.DEBUG) + self.logger.addHandler(self.file_handler) + + def set_node_name(self, name, msg=""): + self.banner("set_node_name: {} current '{}' new '{}'".format(msg, self.node_name, name)) + self.node_name = name if self.fmt: self.fmt.node_name = name + self.logs_dir2 = os.path.join(self.logs_dir, name) def set_log_file(self, log_file): + + if self.log_file: + self.banner("Close file {}".format(self.log_file)) + if self.file_handler: self.logger.removeHandler(self.file_handler) self.file_handler = None if log_file: - if self.logs_dir: - log_file = os.path.join(self.logs_dir, log_file) + if self.logs_dir2: + log_file = os.path.join(self.logs_dir2, log_file) self.ensure_parent(log_file) self.file_handler = logging.FileHandler(log_file) self.file_handler.setFormatter(self.fmt) @@ -93,53 +132,73 @@ def set_log_file(self, log_file): self.log_file = log_file + if self.log_file: + self.banner("Open file {}".format(self.log_file)) + + return self.log_file + + def get_log(self, filename=None): if not filename: - filename = self.log_file + filename2 = self.log_file + elif not os.path.exists(filename) and self.logs_dir2: + filename2 = os.path.join(self.logs_dir2, filename) elif not os.path.exists(filename) and self.logs_dir: - filename = os.path.join(self.logs_dir, filename) + filename2 = os.path.join(self.logs_dir, filename) + else: + filename2 = None + + if not filename2: + return self.error("Failed to find log file {}".format(filename)) - if not filename: - return "" try: - fh = open(filename, 'r') + fh = open(filename2, 'r') data = fh.readlines() fh.close() data = map(str.strip, data) return "\n".join(data) except Exception: - return "" + return self.error("Failed to read log file {}".format(filename2)) def todo(self, etype, name, value): msg = "{}: {} = {}".format(etype, name, value) self.error(msg) raise ValueError(msg) - def debug(self, *args, **kwargs): - msg = " ".join(map(str,args)) - self._log(logging.DEBUG, msg) - def _log(self, lvl, msg): if not msg.strip(): return self.lock.acquire() for line in msg.split("\n"): self.logger.log(lvl, line) self.lock.release() + return msg def log(self, *args, **kwargs): msg = " ".join(map(str,args)) - self._log(logging.INFO, msg) + return self._log(logging.INFO, msg) def info(self, *args, **kwargs): msg = " ".join(map(str,args)) - self._log(logging.INFO, msg) + return self._log(logging.INFO, msg) + + def warning(self, *args, **kwargs): + msg = ["#"*80, " ".join(map(str,args)), "#"*80] + return self._log(logging.WARNING, "\n".join(msg)) + + def debug(self, *args, **kwargs): + msg = " ".join(map(str,args)) + return self._log(logging.DEBUG, msg) + + def banner(self, *args, **kwargs): + msg = ["#"*80, " ".join(map(str,args)), "#"*80] + return self.info("\n".join(msg)) def error(self, *args, **kwargs): msg = "" #msg = msg + "=================================== " msg = msg + " ".join(map(str,args)) #msg = msg + "=================================== " - self._log(logging.ERROR, msg) + return self._log(logging.ERROR, msg) def log_exception(self, e, msg): self.logger.error("=========== exception ==================") diff --git a/spytest/spytest/tgen/scapy/packet.py b/spytest/spytest/tgen/scapy/packet.py index 240b50750c7..8a9d500740d 100755 --- a/spytest/spytest/tgen/scapy/packet.py +++ b/spytest/spytest/tgen/scapy/packet.py @@ -44,6 +44,7 @@ # "circuit_endpoint_type", + "enable_stream", "enable_stream_only_gen", # @@ -123,16 +124,17 @@ class ScapyPacket(object): def __init__(self, iface, dbg=0, dry=False, hex=False, logger=None): self.dry = dry - self.logger = logger or Logger() + self.errs = [] + self.logger = logger or Logger(dry) try: self.logger.info("SCAPY VERSION = {}".format(Conf().version)) except Exception: self.logger.info("SCAPY VERSION = UNKNOWN") self.utils = Utils(self.dry, logger=self.logger) self.max_rate_pps = self.utils.get_env_int("SPYTEST_SCAPY_MAX_RATE_PPS", 100) self.dbg = dbg - self.show_summary = False + self.show_summary = bool(self.dbg > 2) self.hex = hex self.iface = iface - self.is_vde = iface.startswith("vde") + self.is_vde = not dry and iface.startswith("vde") self.tx_count = 0 self.rx_count = 0 self.rx_sock = None @@ -140,6 +142,7 @@ def __init__(self, iface, dbg=0, dry=False, hex=False, logger=None): self.finished = False self.exabgp_nslist = [] self.cleanup() + self.mtu = 9194 if iface and not self.dry: #already init_bridge called in cleanup() #self.init_bridge(iface) @@ -151,25 +154,25 @@ def __init__(self, iface, dbg=0, dry=False, hex=False, logger=None): self.os_system("ip link set dev {0}-br up".format(iface)) self.os_system("ip link set dev {0} up".format(iface)) self.os_system("ip link set dev {0} promisc on".format(iface)) - self.os_system("ip link set dev {0} mtu 9194".format(iface)) - self.os_system("ip link set dev {0}-br mtu 9194".format(iface)) + self.os_system("ip link set dev {0} mtu {1}".format(iface, self.mtu)) + self.os_system("ip link set dev {0}-br mtu {1}".format(iface, self.mtu)) self.os_system("echo 0 > /sys/class/net/{0}-br/bridge/multicast_snooping".format(iface)) #self.os_system("ip link del {0}-rx".format(iface)) self.os_system("ip link add {0}-rx type dummy".format(iface)) - self.os_system("ip link set dev {0}-rx mtu 9194".format(iface)) + self.os_system("ip link set dev {0}-rx mtu {1}".format(iface, self.mtu)) self.configure_ipv6(iface) self.os_system("tc qdisc del dev {0} ingress".format(iface)) self.os_system("tc qdisc add dev {0} ingress".format(iface)) #self.os_system("tc filter del dev {0} parent ffff: protocol all u32 match u8 0 0 action mirred egress mirror dev {0}-rx".format(iface)) self.os_system("tc filter add dev {0} parent ffff: protocol all u32 match u8 0 0 action mirred egress mirror dev {0}-rx".format(iface)) self.os_system("ip link set {0}-rx up".format(iface)) - if self.dbg > 2: + if self.dbg > 5: self.os_system("ifconfig") self.rx_open() def os_system(self, cmd): self.logger.info("EXEC: {}".format(cmd)) - os.system(cmd) + return self.utils.exec_cmd(cmd) def init_bridge(self, iface): if iface and not self.dry: @@ -182,16 +185,32 @@ def init_bridge(self, iface): def configure_ipv6(self, iface): iface_rx = "{0}-rx".format(iface) - self.os_system("sysctl -w net.ipv6.conf.{}.forwarding=0 >/dev/null".format(iface_rx)) - self.os_system("sysctl -w net.ipv6.conf.{}.accept_ra=0 >/dev/null".format(iface_rx)) - self.os_system("sysctl -w net.ipv6.conf.{}.forwarding=1 >/dev/null".format(iface)) - #self.os_system("sysctl -w net.ipv6.conf.{}.accept_ra=1 >/dev/null".format(iface)) - self.os_system("sysctl -w net.ipv6.conf.all.forwarding=0 >/dev/null") + self.os_system("sysctl -w net.ipv6.conf.{}.forwarding=0".format(iface_rx)) + self.os_system("sysctl -w net.ipv6.conf.{}.accept_ra=0".format(iface_rx)) + self.os_system("sysctl -w net.ipv6.conf.{}.forwarding=1".format(iface)) + #self.os_system("sysctl -w net.ipv6.conf.{}.accept_ra=1".format(iface)) + self.os_system("sysctl -w net.ipv6.conf.all.forwarding=0") def __del__(self): self.logger.info("packet cleanup todo: ", self.iface) self.cleanup() + def error(self, *args, **kwargs): + msg = self.logger.error(*args, **kwargs) + self.errs.append(msg) + return msg + + def banner(self, *args, **kwargs): + msg = self.logger.banner(*args, **kwargs) + self.errs.append(msg) + return msg + + def get_alerts(self): + errs = [] + errs.extend(self.errs) + self.errs = [] + return errs + def clear_stats(self): self.tx_count = 0 self.rx_count = 0 @@ -202,8 +221,9 @@ def close_sock(self, sock): return None def cleanup(self): + print("ScapyPacket {} cleanup...".format(self.iface)) self.logger.info("ScapyPacket {} cleanup...".format(self.iface)) - self.exabgpd_stop() + self.exabgpd_stop_all() self.finished = True self.rx_sock = self.close_sock(self.rx_sock) self.tx_sock = self.close_sock(self.tx_sock) @@ -251,17 +271,17 @@ def readp(self, iface): return packet - def sendp(self, data, iface, stream_name, left): + def sendp(self, pkt, data, iface, stream_name, left): self.tx_count = self.tx_count + 1 self.trace_stats() if self.dbg > 2 or (self.dbg > 1 and left != 0): - cmd = "" if not self.show_summary else Ether(data).command() + cmd = "" if not self.show_summary else pkt.command() msg = "sendp:{}:{} len:{} count:{} {}".format self.logger.debug(msg(iface, stream_name, len(data), self.tx_count, cmd)) if self.dbg > 2: - self.trace_packet(data, self.hex) + self.trace_packet(pkt, self.hex) if not self.dry: if not self.tx_sock: @@ -284,11 +304,11 @@ def trace_stats(self): pass def show_pkt(self, pkt): - try: - self.logger.debug(pkt.show2(dump=True)) - except Exception: - self.logger.debug(pkt.command()) - pkt.show2() + if self.dbg > 4: + return self.utils.exec_func(pkt.show2) + if self.dbg > 3: + return self.utils.exec_func(pkt.summary) + return "" def trace_packet(self, pkt, hex=True, fields=True): if not fields and not hex: return @@ -315,7 +335,7 @@ def send_packet(self, pwa, iface, stream_name, left): except Exception: crc = binascii.unhexlify('00' * 4) bstr = bytes(strpkt+crc) - self.sendp(bstr, iface, stream_name, left) + self.sendp(pwa.pkt, bstr, iface, stream_name, left) return bstr def check(self, pkt): @@ -367,7 +387,7 @@ def ensure_int(self, name, value, min_val, max_val): if value < 0 or value > 13312: msg = "invalid value {} = {} shoud be > {} and < {}" msg = msg.format(name, value, min_val, max_val) - self.logger.error(msg) + self.error(msg) def build_udp(self, kws): udp = UDP() @@ -437,12 +457,38 @@ def build_igmp(self, kws): def fill_emulation_params(self, stream): - # read params from emulation interfaces - if "emulation_src_handle" in stream.kws: - emulation_src_handle = stream.kws.pop("emulation_src_handle") - intf_ip_addr = emulation_src_handle.kws.get("intf_ip_addr", "0.0.0.0") - ipv6_intf_addr = emulation_src_handle.kws.get("ipv6_intf_addr", "") - count = self.utils.intval(emulation_src_handle.kws, "count", 1) + circuit_endpoint_type = stream.kws.pop("circuit_endpoint_type", None) + self.logger.debug("stream.kws-0 = {}".format(stream.kws)) + + src_info, dst_info = {}, {} + src_intf = stream.kws.pop("emulation_src_handle", None) + dst_intf = stream.kws.pop("emulation_dst_handle", None) + if src_intf: + ns = "ns_{}_{}".format(src_intf.name, 0) + src_info = Utils.get_ip_addr_dev("veth1", ns) + self.logger.debug("src_info = {} {}".format(ns, src_info)) + if circuit_endpoint_type == "ipv4": + src_info.pop("inet6", "") + elif circuit_endpoint_type == "ipv6": + src_info.pop("inet", "") + stream.kws["mac_src"] = [src_info.get("link/ether", "00:00:00:00:00:00")] + if dst_intf: + ns = "ns_{}_{}".format(dst_intf.name, 0) + dst_info = Utils.get_ip_addr_dev("veth1", ns) + self.logger.debug("dst_info = {} {}".format(ns, dst_info)) + if circuit_endpoint_type == "ipv4": + dst_info.pop("inet6", "") + elif circuit_endpoint_type == "ipv6": + dst_info.pop("inet", "") + stream.kws["mac_dst"] = [dst_info.get("link/ether", "00:00:00:00:00:00")] + + # read params from emulation interfaces + if src_intf: + intf_ip_addr = src_info.get("inet", "0.0.0.0").split("/")[0] + intf_ip_addr = src_intf.kws.get("intf_ip_addr", intf_ip_addr) + ipv6_intf_addr = src_info.get("inet6", "").split("/")[0] + ipv6_intf_addr = src_intf.kws.get("ipv6_intf_addr", ipv6_intf_addr) + count = self.utils.intval(src_intf.kws, "count", 1) if ipv6_intf_addr: stream.kws["l3_protocol"] = "ipv6" stream.kws["ipv6_src_addr"] = ipv6_intf_addr @@ -456,19 +502,20 @@ def fill_emulation_params(self, stream): if count > 1: stream.kws["ip_src_count"] = count stream.kws["ip_src_mode"] = "increment" - try: - stream.kws["mac_src"] = [self.get_my_mac(emulation_src_handle)] - stream.kws["mac_dst"] = [self.get_arp_mac(emulation_src_handle)] - except Exception as exp: - self.logger.info(exp) - self.logger.info(traceback.format_exc()) + #try: + #stream.kws["mac_src"] = [src_info.get("link/ether", "00:00:00:00:00:00")] + #stream.kws["mac_dst"] = [self.get_arp_mac(src_intf)] + #except Exception as exp: + #self.logger.info(exp) + #self.logger.info(traceback.format_exc()) self.logger.debug("updated stream.kws-1 = {}".format(stream.kws)) - if "emulation_dst_handle" in stream.kws: - emulation_dst_handle = stream.kws.pop("emulation_dst_handle") - intf_ip_addr = emulation_dst_handle.kws.get("intf_ip_addr", "") - ipv6_intf_addr = emulation_dst_handle.kws.get("ipv6_intf_addr", "") - count = self.utils.intval(emulation_dst_handle.kws, "count", 1) + if dst_intf: + intf_ip_addr = dst_info.get("inet", "").split("/")[0] + intf_ip_addr = dst_intf.kws.get("intf_ip_addr", intf_ip_addr) + ipv6_intf_addr = dst_info.get("inet6", "").split("/")[0] + ipv6_intf_addr = dst_intf.kws.get("ipv6_intf_addr", ipv6_intf_addr) + count = self.utils.intval(dst_intf.kws, "count", 1) if ipv6_intf_addr: stream.kws["l3_protocol"] = "ipv6" stream.kws["ipv6_dst_addr"] = ipv6_intf_addr @@ -481,6 +528,7 @@ def fill_emulation_params(self, stream): if count > 1: stream.kws["ip_dst_count"] = count stream.kws["ip_dst_mode"] = "increment" + #stream.kws["mac_dst"] = [dst_info.get("link/ether", "00:00:00:00:00:00")] self.logger.debug("updated stream.kws-2 = {}".format(stream.kws)) def build_first(self, stream): @@ -496,7 +544,11 @@ def build_first(self, stream): duration = self.pop_int(kws, "duration", 1) duration2 = self.pop_int(kws, "duration2", 0) + rate_percent = self.pop_int(kws, "rate_percent", 0) rate_pps = self.pop_int(kws, "rate_pps", 1) + if rate_percent > 0: + rate_pps = self.max_rate_pps + self.banner("rate_percent {} not supported using {} pps".format(rate_percent, rate_pps)) l2_encap = self.pop_str(kws, "l2_encap", "") l3_protocol = self.pop_str(kws, "l3_protocol", "") l4_protocol = self.pop_str(kws, "l4_protocol", "") @@ -522,10 +574,10 @@ def build_first(self, stream): mac_dst_mode = kws.get("mac_dst_mode", "fixed").strip() if mac_dst_mode not in ["fixed", "increment", "decrement", "list"]: - self.logger.error("unhandled option mac_dst_mode = {}".format(mac_dst_mode)) + self.error("unhandled option mac_dst_mode = {}".format(mac_dst_mode)) mac_src_mode = kws.get("mac_src_mode", "fixed").strip() if mac_src_mode not in ["fixed", "increment", "decrement", "list"]: - self.logger.error("unhandled option mac_src_mode = {}".format(mac_src_mode)) + self.error("unhandled option mac_src_mode = {}".format(mac_src_mode)) self.ensure_int("l3_length", l3_length, 44, 16365) if length_mode in ["random", "increment", "incr"]: @@ -533,10 +585,10 @@ def build_first(self, stream): self.ensure_int("frame_size_max", frame_size_max, 44, 13312) self.ensure_int("frame_size_step", frame_size_step, 0, 13312) elif length_mode != "fixed": - self.logger.error("unhandled option length_mode = {}".format(length_mode)) + self.error("unhandled option length_mode = {}".format(length_mode)) if data_pattern_mode != "fixed": - self.logger.error("unhandled option data_pattern_mode = {}".format(data_pattern_mode)) + self.error("unhandled option data_pattern_mode = {}".format(data_pattern_mode)) # update parsed values stream.kws["mac_src"] = mac_src @@ -676,7 +728,7 @@ def build_first(self, stream): # verify unhandled options for key, value in kws.items(): if key not in stale_list_ignore: - self.logger.error("unhandled option {} = {}".format(key, value)) + self.error("unhandled option {} = {}".format(key, value)) pwa = SpyTestDict() pwa.add_signature = add_signature @@ -686,7 +738,7 @@ def build_first(self, stream): pwa.pkts_per_burst = pkts_per_burst pwa.transmit_mode = transmit_mode if rate_pps > self.max_rate_pps: - self.logger.debug("drop the rate from {} to {}".format(rate_pps, self.max_rate_pps)) + self.error("drop the rate from {} to {}".format(rate_pps, self.max_rate_pps)) rate_pps = self.max_rate_pps pwa.rate_pps = rate_pps pwa.duration = duration @@ -1203,7 +1255,7 @@ def get_arp_mac(self, intf, default="00:00:00:00:00:00"): return re.search(r"(([a-f\d]{1,2}\:){5}[a-f\d]{1,2})", output).groups()[0] except Exception as exp: - self.logger.debug("Fail-1: get_arp_mac {} {} {}".format(ipv4gw, ipv6gw, exp)) + self.banner("Fail-1: get_arp_mac {} {} {}".format(ipv4gw, ipv6gw, exp)) # try getting from arping output try: @@ -1212,7 +1264,7 @@ def get_arp_mac(self, intf, default="00:00:00:00:00:00"): self.logger.debug("{} = \n {}".format(cmd, output)) return re.search(r"(([a-f\d]{1,2}\:){5}[a-f\d]{1,2})", output).groups()[0] except Exception as exp: - self.logger.debug("Fail-2: get_arp_mac {} {} {}".format(ipv4gw, ipv6gw, exp)) + self.banner("Fail-2: get_arp_mac {} {} {}".format(ipv4gw, ipv6gw, exp)) return default @@ -1225,7 +1277,7 @@ def ping(self, intf, ping_dst, index=0): if ip._version == 6: cmd = "ip netns exec ns_{0} ping6 -c 1 {1}".format(ns, ping_dst) except Exception as exp: - self.logger.error(exp) + self.error(exp) # execute the command return self.utils.cmdexec(cmd) @@ -1251,21 +1303,25 @@ def log_large_file(self, fname): self.logger.info("===================================") def exabgpd_file(self, ns, extn): - cwd = self.logger.logs_dir - #return "{}/logs/current/exabgpd_{}.{}".format(cwd, ns, extn) - return "{}/exabgpd_{}.{}".format(cwd, ns, extn) - - def exabgpd_stop(self, ns0=None): - nslist = self.exabgp_nslist if ns0 is None else [ns0] - for ns in nslist: - logfile = self.exabgpd_file(ns, "log") - pidfile = self.exabgpd_file(ns, "pid") - self.log_large_file(logfile) - self.logger.info(self.utils.cat_file(pidfile)) + return "{}/exabgpd_{}.{}".format(self.logger.logs_dir, ns, extn) + + def exabgpd_stop_all(self): + self.os_system("ps -ef") + for pidfile in self.utils.list_files(self.logger.logs_dir, "exabgpd_*.pid"): cmd = "pkill -F {}".format(pidfile) - self.utils.cmdexec(cmd) - if ns in self.exabgp_nslist: - self.exabgp_nslist.remove(ns) + print(cmd) + out = self.os_system(cmd) + print(out) + + def exabgpd_stop(self, ns): + logfile = self.exabgpd_file(ns, "log") + pidfile = self.exabgpd_file(ns, "pid") + self.log_large_file(logfile) + self.logger.info(self.utils.cat_file(pidfile)) + cmd = "pkill -F {}".format(pidfile) + self.os_system(cmd) + if ns in self.exabgp_nslist: + self.exabgp_nslist.remove(ns) return True def config_exabgp_one(self, enable, intf, index=0): @@ -1279,7 +1335,6 @@ def config_exabgp_one(self, enable, intf, index=0): pidfile = self.exabgpd_file(ns, "pid") envfile = self.exabgpd_file(ns, "env") cfgfile = self.exabgpd_file(ns, "cfg") - cmdfile = self.exabgpd_file(ns, "cmd") intf_ip_addr = intf.kws.get("intf_ip_addr", "") ipv6_intf_addr = intf.kws.get("ipv6_intf_addr", "") @@ -1289,40 +1344,13 @@ def config_exabgp_one(self, enable, intf, index=0): remote_ipv6_addr = intf.bgp_kws.get("remote_ipv6_addr", "") if remote_ipv6_addr: remote_ip_addr = remote_ipv6_addr - as_path = intf.bgp_kws.get("as_path", None) - as_seq = None - if as_path and "as_seq:" in as_path: - try: as_seq = int(as_path.replace("as_path:", "")) - except Exception: as_seq = None - remote_as = self.utils.intval(intf.bgp_kws, "remote_as", 65001) local_as = self.utils.intval(intf.bgp_kws, "local_as", 65007) + #enable_4_byte_as = self.utils.intval(intf.bgp_kws, "enable_4_byte_as", 0) #ip_version = self.utils.intval(intf.bgp_kws, "ip_version", 4) - ############################# - cmds = [] - num_routes = self.utils.intval(intf.bgp_kws, "num_routes", 0) - prefix = intf.bgp_kws.get("prefix", "") - if not prefix and num_routes > 0: - msg = "Prefix not specified num_routes={}".format(num_routes) - self.logger.error(msg) - else: - for _ in range(num_routes): - remote_ipv6_addr = intf.bgp_kws.get("remote_ipv6_addr", "") - if remote_ipv6_addr: - cmd = "announce route {}/128 next-hop self".format(prefix) - prefix = self.utils.incrementIPv6(prefix, "0:0:0:1::") - else: - cmd = "announce route {}/24 next-hop self".format(prefix) - prefix = self.utils.incrementIPv4(prefix, "0.0.1.0") - # append as-path sequence - if as_seq: cmd = cmd + "as-path [{}]".format(as_seq) - cmds.append(cmd) - self.utils.fwrite("\n".join(cmds), cmdfile) - ############################# - # TODO: batch routes - # announce attribute next-hop self nlri 100.10.0.0/16 100.20.0.0/16 - ############################# + # create route config + cmdfile = self.config_exabgp_route(enable, intf, index) # build router id from ns router_id = ns.replace("_", ".") + ".0" @@ -1410,13 +1438,50 @@ def config_exabgp_one(self, enable, intf, index=0): return True + def config_exabgp_route(self, enable, intf, index=0): + ns = "{}_{}".format(intf.name, index) + cmdfile = self.exabgpd_file(ns, "cmd") + cmds = [] + + for br in intf.bgp_routes.values(): + if not br.enable: continue + as_path = br.get("as_path", None) + as_seq = None + if as_path and "as_seq:" in as_path: + try: as_seq = int(as_path.replace("as_path:", "")) + except Exception: as_seq = None + + num_routes = self.utils.intval(br, "num_routes", 0) + prefix = br.get("prefix", "") + if not prefix and num_routes > 0: + msg = "Prefix not specified num_routes={}".format(num_routes) + self.error(msg) + else: + for _ in range(num_routes): + remote_ipv6_addr = intf.bgp_kws.get("remote_ipv6_addr", "") + if remote_ipv6_addr: + cmd = "announce route {}/128 next-hop self".format(prefix) + prefix = self.utils.incrementIPv6(prefix, "0:0:0:1::") + else: + cmd = "announce route {}/24 next-hop self".format(prefix) + prefix = self.utils.incrementIPv4(prefix, "0.0.1.0") + # append as-path sequence + if as_seq: cmd = cmd + "as-path [{}]".format(as_seq) + cmds.append(cmd) + self.utils.fwrite("\n".join(cmds), cmdfile) + ############################# + # TODO: batch routes + # announce attribute next-hop self nlri 100.10.0.0/16 100.20.0.0/16 + ############################# + return cmdfile + def control_exabgp(self, intf): num_routes = self.utils.intval(intf.bgp_kws, "num_routes", 0) ns = "{}_{}".format(intf.name, 0) prefix = intf.bgp_kws.get("prefix", "") if not prefix: msg = "Prefix not specified num_routes={}".format(num_routes) - self.logger.error(msg) + self.error(msg) #return False #envfile = self.exabgpd_file(ns, "env") for _ in range(num_routes): @@ -1445,7 +1510,7 @@ def control_exabgp(self, intf): prefix = self.utils.incrementIPv4(prefix, "0.0.1.0") output = self.utils.cmdexec(cmd) if "could not send command to ExaBGP" in output: - self.logger.error(output) + self.error(output) return False return True @@ -1461,6 +1526,10 @@ def apply_bgp(self, op, enable, intf): retval = self.config_exabgp(True, intf) return retval + def apply_bgp_route(self, enable, route): + route.enable = enable + return self.apply_bgp("", True, route.intf) + def config_igmp(self, mode, intf, host): ns = "{}_{}".format(intf.name, 0) igmp_version = host.get("igmp_version", "v3") @@ -1468,7 +1537,7 @@ def config_igmp(self, mode, intf, host): ip4_addr = host.get("grp_ip_addr_start", "224.0.0.1") ip4_step = "0.0.1.0" - self.logger.error("TODO: config_igmp {} = {} {}".format(mode, intf.iface, host)) + self.error("TODO: config_igmp {} = {} {}".format(mode, intf.iface, host)) # force IGMP version if igmp_version == "v2": @@ -1497,8 +1566,10 @@ def config_igmp(self, mode, intf, host): #kwargs = ut_stream_get(0) #kwargs = ut_stream_get(0, mac_dst_mode='list', mac_dst=["00.00.00.00.00.02", "00.00.00.00.00.04", "00.00.00.00.00.06"]) - kwargs = ut_stream_get(0, mac_src_mode='list', mac_src="00.00.00.00.00.02 00.00.00.00.00.04 00.00.00.00.00.06") - s = ScapyStream(0, None, None, **kwargs) + #kwargs = ut_stream_get(0, mac_src_mode='list', mac_src="00.00.00.00.00.02 00.00.00.00.00.04 00.00.00.00.00.06") + kwargs = ut_stream_get(16) + s = ScapyStream(0, 0, None, None, **kwargs) + pwa = packet.build_first(s) while pwa: packet.logger.info("=======================================================") diff --git a/spytest/spytest/tgen/scapy/port.py b/spytest/spytest/tgen/scapy/port.py index 734d0ca05f6..25ad410a8fa 100755 --- a/spytest/spytest/tgen/scapy/port.py +++ b/spytest/spytest/tgen/scapy/port.py @@ -64,25 +64,39 @@ def __str__(self): return ''.join([('%s=%s' % x) for x in self.kws.items()]) class ScapyInterface(object): - def __init__(self, port, index, *args, **kws): + def __init__(self, port, index, handle, *args, **kws): self.port = port.name self.iface = port.iface self.index = index + self.handle = handle self.args = args self.kws = copy.copy(kws) self.enable = True self.name = "{}_{}".format(self.port, self.index) self.name = self.name.replace("/", "_") self.igmp_hosts = SpyTestDict() + self.bgp_routes = SpyTestDict() def add_igmp_host(self, *args, **kws): index = len(self.igmp_hosts) host_handle = "igmp-{}-{}-{}".format(self.port, self.index, index) host = SpyTestDict() + host.intf = self host.host_handle = host_handle host.update(kws) self.igmp_hosts[host_handle] = host - return host_handle + return host + + def add_bgp_route(self, *args, **kws): + index = len(self.bgp_routes) + route_handle = "bgp-route-{}-{}-{}".format(self.port, self.index, index) + route = SpyTestDict() + route.enable = True + route.intf = self + route.route_handle = route_handle + route.update(kws) + self.bgp_routes[route_handle] = route + return route class ScapyPort(object): def __init__(self, name, iface, dry=False, dbg=0, logger=None): @@ -91,6 +105,7 @@ def __init__(self, name, iface, dry=False, dbg=0, logger=None): self.iface = iface self.dry = dry self.dbg = dbg + self.errs = [] self.logger = logger or Logger() self.utils = Utils(self.dry, logger=self.logger) self.streams = SpyTestDict() @@ -126,6 +141,21 @@ def cleanup(self): self.driver.cleanup() self.clean_streams() + def get_alerts(self): + errs = [] + errs.extend(self.errs) + errs.extend(self.driver.get_alerts()) + self.errs = [] + return errs + + def get_all_handles(self): + handles = [] + for ih, intf in self.interfaces.items(): + handles.append(ih) + for brh in intf.bgp_routes: + handles.append(brh) + return handles + def set_admin_status(self, val): self.admin_status = val @@ -204,22 +234,22 @@ def traffic_config(self, track_port, *args, **kws): elif mode == "remove": stream_id = kws.get('stream_id', None) if stream_id not in self.streams: - self.error("invalid", "stream_id", stream_id) + self.error("invalid", "traffic_config-remove-stream_id", stream_id) self.driver.stopTransmit(handle = stream_id) elif mode == "enable": stream_id = kws.get('stream_id', None) if stream_id not in self.streams: - self.error("invalid", "stream_id", stream_id) + self.error("invalid", "traffic_config-enable-stream_id", stream_id) self.streams[stream_id].enable = True elif mode == "disable": stream_id = kws.get('stream_id', None) if stream_id not in self.streams: - self.error("invalid", "stream_id", stream_id) + self.error("invalid", "traffic_config-disable-stream_id", stream_id) self.streams[stream_id].enable = False elif mode == "modify": stream_id = kws.get('stream_id', None) if stream_id not in self.streams: - self.error("invalid", "stream_id", stream_id) + self.error("invalid", "traffic_config-modify-stream_id", stream_id) self.streams[stream_id].kws.update(kws) else: self.error("unsupported", "traffic_config: mode", mode) @@ -228,6 +258,9 @@ def traffic_config(self, track_port, *args, **kws): def find_interface(self, handle): if handle in self.interfaces: return self.interfaces[handle] + route = self.get_bgp_route(handle) + if route: + return route.intf return None def igmp_host_validate(self, handle): @@ -236,6 +269,19 @@ def igmp_host_validate(self, handle): return True return False + def get_bgp_route(self, handle): + for ih, intf in self.interfaces.items(): + if ih != handle: + for brh, br in intf.bgp_routes.items(): + if brh == handle: + return br + elif intf.bgp_routes: + for brh, br in intf.bgp_routes.items(): + return br + else: + return None + return None + def interface_validate(self, handle): return bool(handle in self.interfaces) @@ -255,7 +301,7 @@ def interface_config(self, *args, **kws): self.error("too large > 100", "count", count) index = len(self.interfaces) handle = self.interface_encode(index) - interface = ScapyInterface(self, index, *args, **kws) + interface = ScapyInterface(self, index, handle, *args, **kws) self.interfaces[handle] = interface self.driver.createInterface(interface) if count > 1: @@ -267,7 +313,7 @@ def interface_config(self, *args, **kws): if isinstance(handle, list): handle = handle[0] if not self.interface_validate(handle): - self.error("invalid", "handle", handle) + self.error("invalid", "interface_config-destroy-handle", handle) self.driver.deleteInterface(self.interfaces[handle]) del self.interfaces[handle] elif mode is None and send_ping and ping_dst: @@ -275,7 +321,7 @@ def interface_config(self, *args, **kws): if isinstance(handle, list): handle = handle[0] if not self.interface_validate(handle): - self.error("invalid", "protocol_handle", handle) + self.error("invalid", "interface_config-ping-protocol_handle", handle) #index = self.interfaces[handle].index #rv = self.driver.ping(self.interfaces[handle], ping_dst, index) rv = self.driver.ping(self.interfaces[handle], ping_dst, 0) @@ -286,7 +332,7 @@ def interface_config(self, *args, **kws): if isinstance(handle, list): handle = handle[0] if not self.interface_validate(handle): - self.error("invalid", "protocol_handle", handle) + self.error("invalid", "interface_config-arp-protocol_handle", handle) index = self.interfaces[handle].index self.driver.send_arp(self.interfaces[handle], index) res[self.port_handle] = SpyTestDict() @@ -304,10 +350,10 @@ def emulation_bgp_config(self, *args, **kws): if isinstance(handle, list): handle = handle[0] if not self.interface_validate(handle): - self.error("invalid", "handle", handle) + self.error("invalid", "emulation_bgp_config-handle", handle) intf = self.interfaces[handle] intf.bgp_kws = copy.copy(kws) - if mode == "start": + if mode == "enable": retval = self.driver.apply_bgp("config", True, intf) else: retval = self.driver.apply_bgp("config", False, intf) @@ -320,38 +366,56 @@ def emulation_bgp_config(self, *args, **kws): def emulation_bgp_route_config(self, *args, **kws): res = SpyTestDict() res.status = "1" - mode = kws.get('mode', None) - if mode in ["add", "remove"]: - handle = kws.get('handle', None) - res.handle = handle - if isinstance(handle, list): - handle = handle[0] + mode = kws.pop('mode', None) + if mode not in ["add", "remove"]: + self.error("unsupported", "emulation_bgp_route_config: mode", mode) + return res + + handle = kws.get('handle', None) + res.handle = handle + if isinstance(handle, list): + handle = handle[0] + + if mode == "add": if not self.interface_validate(handle): - self.error("invalid", "handle", handle) + self.error("invalid", "emulation_bgp_route_config-add-handle", handle) + return res intf = self.interfaces[handle] - intf.bgp_kws.update(kws) - if mode == "add": - retval = self.driver.apply_bgp("route", True, intf) - res["handles"] = [handle] - else: - retval = self.driver.apply_bgp("route", False, intf) - if not retval: - self.error("Failed", "emulation_bgp_route_config: mode", mode) + route = intf.add_bgp_route(*args, **kws) + retval = self.driver.apply_bgp_route(True, route) + if retval: + res.handle = route.route_handle else: - self.error("unsupported", "emulation_bgp_route_config: mode", mode) + try: + route = self.get_bgp_route(handle) + except Exception as exp: + self.error("failed to parge route", str(exp), handle) + if not route: + self.error("invalid", "emulation_bgp_route_config-remove-handle", handle) + return res + retval = self.driver.apply_bgp_route(False, route) + route.enable = False + + if not retval: + self.error("Failed", "emulation_bgp_route_config: mode", mode) return res def emulation_bgp_control(self, *args, **kws): res = SpyTestDict() res.status = "1" - mode = kws.get('mode', None) + mode = kws.pop('mode', None) if mode in ["start", "stop"]: handle = kws.get('handle', None) if isinstance(handle, list): handle = handle[0] - if not self.interface_validate(handle): - self.error("invalid", "handle", handle) - intf = self.interfaces[handle] + if self.interface_validate(handle): + intf = self.interfaces[handle] + else: + route = self.get_bgp_route(handle) + if not route: + self.error("invalid", "emulation_bgp_control-handle", handle) + return res + intf = route.intf if mode == "start": retval = self.driver.apply_bgp("control", True, intf) else: @@ -372,9 +436,10 @@ def emulation_igmp_config(self, *args, **kws): if isinstance(handle, list): handle = handle[0] if not self.interface_validate(handle): - self.error("invalid", "handle", handle) + self.error("invalid", "emulation_igmp_config-handle", handle) intf = self.interfaces[handle] - res.host_handle = intf.add_igmp_host(*args, **kws) + host = intf.add_igmp_host(*args, **kws) + res.host_handle = host.host_handle else: self.error("unsupported", "emulation_igmp_config: mode", mode) return res @@ -405,5 +470,6 @@ def emulation_igmp_control(self, *args, **kws): def error(self, etype, name, value): msg = "{}: {} = {}".format(etype, name, value) self.logger.error("=================== {} ==================".format(msg)) + self.errs.append(msg) raise ValueError(msg) diff --git a/spytest/spytest/tgen/scapy/pyro-service.py b/spytest/spytest/tgen/scapy/pyro-service.py index 444c423d443..8ee7a3c53db 100755 --- a/spytest/spytest/tgen/scapy/pyro-service.py +++ b/spytest/spytest/tgen/scapy/pyro-service.py @@ -59,13 +59,12 @@ def startNameServer(host, hmac=None): class CustomDaemon(Pyro4.Daemon): def clientDisconnect(self, conn): - # If required, you *can* override this to do custom resource freeing. - # But this is not needed if your resource objects have a proper 'close' method; - # this method is called by Pyro itself once the client connection gets closed. - # In this example this override is only used to print out some info. try: - print("client disconnects:", conn.sock.getpeername()) - print(" resources: ", [r.name for r in conn.tracked_resources]) + logger.info("client disconnects: %s", conn.sock.getpeername()) + logger.info("Remove Connection %s", id(conn)) + obj = conn.pyroInstances.get(PyRoScapyService) + logger.info("Remove Instance %s", id(obj)) + del obj except Exception: pass @@ -74,7 +73,7 @@ class PyRoScapyService(ScapyService): @classmethod def create_instance(cls): obj = cls() - print("Created {}".format(id(obj))) + logger.info("Created %s", id(obj)) obj.correlation_id = Pyro4.current_context.correlation_id return obj @@ -91,13 +90,13 @@ def main(): custom_daemon = CustomDaemon() break except Exception as exp: - print(exp) + logger.info(exp) custom_daemon = None time.sleep(2) if use_ns: startNameServer("0.0.0.0") - print("PYRO ScapyService started: {}".format(custom_daemon)) + logger.info("PYRO ScapyService started: %s", custom_daemon) Pyro4.Daemon.serveSimple( { PyRoScapyService: "scapy-tgen" diff --git a/spytest/spytest/tgen/scapy/server.py b/spytest/spytest/tgen/scapy/server.py index 6c44b40c1bd..d088c857f2f 100755 --- a/spytest/spytest/tgen/scapy/server.py +++ b/spytest/spytest/tgen/scapy/server.py @@ -10,26 +10,28 @@ from utils import Utils class ScapyServer(object): - def __init__(self, dry=False, dbg=0): + def __init__(self, dry=False, dbg=0, name="scpy-tgen"): self.node_name = "" self.dry = dry self.dbg = dbg + self.errs = [] self.ports = SpyTestDict() self.mgrps = SpyTestDict() self.msrcs = SpyTestDict() self.portmap = SpyTestDict() - if not dry: - os.system("ip -all netns del") - os.system("sysctl -w net.bridge.bridge-nf-call-arptables=0 >/dev/null 2>&1") - os.system("sysctl -w net.bridge.bridge-nf-call-ip6tables=0 >/dev/null 2>&1") - os.system("sysctl -w net.bridge.bridge-nf-call-iptables=0 >/dev/null 2>&1") model = os.getenv("SCAPY_TGEN_PORTMAP", "eth1") logs_root = os.getenv("SCAPY_TGEN_LOGS_PATH", "/tmp/scapy-tgen") time_spec = datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S_%f") logs_path = "{}/inst-{}".format(logs_root, time_spec) - self.logger = Logger(logs_dir=logs_path, dry=dry) + self.logger = Logger(logs_dir=logs_path, dry=dry, name=name) self.portmap_init(model) - self.logger.set_node_name(self.node_name) + self.logger.set_node_name(self.node_name, "init") + self.utils = Utils(self.dry, logger=self.logger) + if not dry: + self.utils.exec_cmd("ip -all netns del") + self.utils.exec_cmd("sysctl -w net.bridge.bridge-nf-call-arptables=0") + self.utils.exec_cmd("sysctl -w net.bridge.bridge-nf-call-ip6tables=0") + self.utils.exec_cmd("sysctl -w net.bridge.bridge-nf-call-iptables=0") def __del__(self): self.logger.debug("ScapyServer exiting...") @@ -39,7 +41,7 @@ def __del__(self): self.msrcs = SpyTestDict() def trace_api(self, *args, **kws): - print(self.node_name, args, kws) + self.logger.debug(self.node_name, args, kws) def cleanup_ports(self): for key in self.ports.keys(): @@ -65,10 +67,6 @@ def portmap_init(self, model): self.portmap["1/{}".format(i)] = "eth{}".format(i) self.portmap["{}".format(i)] = "eth{}".format(i) - def trace_args(self, *args, **kws): - func = sys._getframe(1).f_code.co_name - self.logger.debug(func, args, kws) - def trace_result(self, res, min_dbg=2): if self.dbg >= min_dbg: self.logger.debug("RESULT:", json.dumps(res)) @@ -77,17 +75,29 @@ def trace_result(self, res, min_dbg=2): def error(self, etype, name, value): msg = "{}: {} = {}".format(etype, name, value) self.logger.error("=================== {} ==================".format(msg)) + self.errs.append(msg) raise ValueError(msg) + def validate_node_name(self, node_name, *args, **kws): + func = sys._getframe(1).f_code.co_name + #self.logger.debug("validate_node_name:", self.node_name, node_name, func, args, kws) + self.logger.debug(node_name, func, args, kws) + if node_name != self.node_name: + self.logger.error("node name mismatch need {} got {}".format(self.node_name, node_name)) + return False + return True + def exposed_server_control(self, node_name, req, data, *args, **kws): - self.trace_args(req, data, *args, **kws) + func = sys._getframe(0).f_code.co_name + #self.logger.debug(self.node_name, node_name, func, req, data, args, kws) + self.logger.debug(node_name, func, req, data, args, kws) retval = "" - if req == "set-name": + if req == "set-name" and not self.node_name: self.node_name = data - self.logger.set_node_name(self.node_name) + self.logger.set_node_name(self.node_name, "set-name") return retval if node_name != self.node_name: - self.logger.error("node name mismatch need {} got {}".format(self.node_name, node_name)) + self.logger.error("node name mismatch need {} got {} for {}".format(self.node_name, node_name, req)) return retval if req == "set-dbg-lvl": self.dbg = int(data) @@ -96,17 +106,23 @@ def exposed_server_control(self, node_name, req, data, *args, **kws): elif req == "set-max-pps": os.environ["SPYTEST_SCAPY_MAX_RATE_PPS"] = str(data) elif req == "init-log": - self.logger.set_log_file(data) + retval = self.logger.set_log_file(data) + self.logger.banner(node_name, self.node_name, req, data) elif req == "read-log": retval = self.logger.get_log(data) elif req == "add-log": - self.logger.info("#"*80) - self.logger.info(data) - self.logger.info("#"*80) + self.logger.banner(node_name, self.node_name, req, data) elif req == "clean-all": for port in self.ports.values(): port.clean_streams() port.clean_interfaces() + elif req == "get-alerts": + errs = [] + errs.extend(self.errs) + for port in self.ports.values(): + errs.extend(port.get_alerts()) + self.errs = [] + retval = "\n".join(errs) elif req == "set-model": self.portmap_init(data) else: @@ -116,8 +132,8 @@ def exposed_server_control(self, node_name, req, data, *args, **kws): def fix_port_name(self, pname): return pname.split("/")[-1].strip() - def exposed_tg_connect(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_connect(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port_list = kws.get('port_list', []) res = SpyTestDict() res.port_handle = SpyTestDict() @@ -132,12 +148,13 @@ def exposed_tg_connect(self, *args, **kws): self.ports[pobj.port_handle] = pobj res.port_handle[pname0] = pobj.port_handle for pobj in delete_ports: + self.logger.debug("deleting handle {} iface {}".format(pobj.port_handle, pobj.iface)) del pobj res.status = 1 return self.trace_result(res) - def exposed_tg_disconnect(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_disconnect(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" res = SpyTestDict() self.cleanup_ports() res.status = 1 @@ -160,8 +177,8 @@ def port_traffic_control(self, port, complete, *args, **kws): handle = kws.pop('handle', None) complete.append([port, handle]) - def exposed_tg_traffic_control(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_traffic_control(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" handle = kws.pop('handle', None) port_handle = kws.pop('port_handle', None) stream_handle = kws.pop('stream_handle', None) @@ -212,8 +229,8 @@ def ensure_port_handle(self, port_handle): raise ValueError(msg) return self.ports[port_handle] - def exposed_tg_interface_control(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_interface_control(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port_handle = kws.get('port_handle', None) port = self.ensure_port_handle(port_handle) mode = kws.get('mode', "aggregate") @@ -227,8 +244,8 @@ def exposed_tg_interface_control(self, *args, **kws): return port.get_admin_status() self.error("Invalid", "mode", mode) - def exposed_tg_packet_control(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_packet_control(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port_handle = kws.get('port_handle', None) if not port_handle: msg = "port_handle is not specified" @@ -237,8 +254,8 @@ def exposed_tg_packet_control(self, *args, **kws): res = port.packet_control(*args, **kws) return self.trace_result(res) - def exposed_tg_packet_stats(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_packet_stats(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" res = SpyTestDict() port_handle = kws.get('port_handle', None) mode = kws.get('mode', "aggregate") @@ -258,8 +275,8 @@ def exposed_tg_packet_stats(self, *args, **kws): res[port_handle]["frame"][index]["frame"] = " ".join(pkt) return self.trace_result(res, 3) - def exposed_tg_traffic_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_traffic_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port_handle = kws.get('port_handle', None) stream_id = kws.get('stream_id', None) port_handle2 = kws.get('port_handle2', None) @@ -324,11 +341,11 @@ def find_intf(self, handle): return intf return None - def get_all_interfaces(self): + def get_all_handles(self): rv = {} for port in self.ports.values(): - for intf in port.interfaces: - rv[intf] = port + for handle in port.get_all_handles(): + rv[handle] = port return rv def verify_handle(self, *args, **kws): @@ -341,7 +358,7 @@ def verify_handle(self, *args, **kws): if port_handle: return self.ports[port_handle] - intfs = self.get_all_interfaces() + intfs = self.get_all_handles() if protocol_handle: handle = protocol_handle if isinstance(handle, list): handle = handle[0] if handle in intfs: return intfs[handle] @@ -352,43 +369,43 @@ def verify_handle(self, *args, **kws): else: self.error("Invalid", "handle", handle) - def exposed_tg_interface_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_interface_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port = self.verify_handle(*args, **kws) res = port.interface_config(*args, **kws) time.sleep(2) return self.trace_result(res) - def exposed_tg_emulation_bgp_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_bgp_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port = self.verify_handle(*args, **kws) res = port.emulation_bgp_config(*args, **kws) time.sleep(2) return self.trace_result(res) - def exposed_tg_emulation_bgp_route_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_bgp_route_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port = self.verify_handle(*args, **kws) res = port.emulation_bgp_route_config(*args, **kws) time.sleep(2) return self.trace_result(res) - def exposed_tg_emulation_bgp_control(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_bgp_control(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port = self.verify_handle(*args, **kws) res = port.emulation_bgp_control(*args, **kws) time.sleep(2) return self.trace_result(res) - def exposed_tg_emulation_igmp_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_igmp_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" port = self.verify_handle(*args, **kws) res = port.emulation_igmp_config(*args, **kws) time.sleep(2) return self.trace_result(res) - def exposed_tg_emulation_multicast_group_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_multicast_group_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" mode = kws.get('mode', "create") index = len(self.mgrps) if mode == "create": @@ -403,8 +420,8 @@ def exposed_tg_emulation_multicast_group_config(self, *args, **kws): return self.trace_result(res) self.error("Invalid", "emulation_multicast_group_config: mode", mode) - def exposed_tg_emulation_multicast_source_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_multicast_source_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" mode = kws.get('mode', "create") index = len(self.msrcs) if mode == "create": @@ -419,8 +436,8 @@ def exposed_tg_emulation_multicast_source_config(self, *args, **kws): return self.trace_result(res) self.error("Invalid", "emulation_multicast_source_config: mode", mode) - def exposed_tg_emulation_igmp_group_config(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_igmp_group_config(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" mode = kws.get('mode', "create") if mode not in ["create", "clear_all"]: self.error("Invalid", "emulation_igmp_group_config: mode", mode) @@ -454,8 +471,8 @@ def exposed_tg_emulation_igmp_group_config(self, *args, **kws): return self.trace_result(res) self.error("Invalid", "emulation_igmp_group_config: session_handle", host_handle) - def exposed_tg_emulation_igmp_control(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_emulation_igmp_control(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" mode = kws.get('mode', "start") if mode not in ["start", "stop", "join", "leave"]: self.error("Invalid", "emulation_igmp_control: mode", mode) @@ -465,8 +482,8 @@ def exposed_tg_emulation_igmp_control(self, *args, **kws): res = port.emulation_igmp_control(*args, **kws) return self.trace_result(res) - def exposed_tg_traffic_stats(self, *args, **kws): - self.trace_args(*args, **kws) + def exposed_tg_traffic_stats(self, node_name, *args, **kws): + if not self.validate_node_name(node_name, *args, **kws): return "" res = SpyTestDict() res["status"] = "1" res["waiting_for_stats"] = "0" @@ -556,12 +573,12 @@ def fill_stats(self, res, tx_stats, rx_stats, detailed=False): Logger.setup() from ut_streams import ut_stream_get server = ScapyServer(True, dbg=3) - res = server.exposed_tg_connect(port_list=["1/1", "1/2"]) + res = server.exposed_tg_connect("", port_list=["1/1", "1/2"]) (tg_ph_1, tg_ph_2) = res.port_handle.values() kwargs = ut_stream_get(0, port_handle=tg_ph_1, mac_dst_mode='list', mac_dst=["00.00.00.00.00.02", "00.00.00.00.00.04"]) #res1 = server.exposed_tg_traffic_config(**kwargs) #server.exposed_tg_traffic_control(action="run", handle=res1["stream_id"]) - server.exposed_tg_interface_config (arp_send_req='1', src_mac_addr='00:00:00:00:00:02', + server.exposed_tg_interface_config ("", arp_send_req='1', src_mac_addr='00:00:00:00:00:02', vlan=1, intf_ip_addr='192.168.12.2', port_handle='port-1/2', mode='config', gateway='192.168.12.1', vlan_id=64) diff --git a/spytest/spytest/tgen/scapy/service.py b/spytest/spytest/tgen/scapy/service.py index 04c86478c97..3c3b8371230 100755 --- a/spytest/spytest/tgen/scapy/service.py +++ b/spytest/spytest/tgen/scapy/service.py @@ -1,13 +1,25 @@ import os import Pyro4 +#import threading +from datetime import datetime from server import ScapyServer @Pyro4.expose class ScapyService(object): def __init__(self): + print("scapy-service-start") dry = bool(os.getenv("SPYTEST_SCAPY_DRYRUN", "0") == "1") - self.server = ScapyServer(dry=dry) + #name = threading.current_thread().name + time_spec = datetime.utcnow().strftime("%Y_%m_%d_%H_%M_%S_%f") + name = "inst-{}".format(time_spec) + self.server = ScapyServer(dry=dry, name=name) + self.server.trace_api("scapy-service-started") + + def __del__(self): + self.server.trace_api("scapy-service-finish") + del self.server + self.server = None def server_control(self, *args, **kws): self.server.trace_api(*args, **kws) diff --git a/spytest/spytest/tgen/scapy/service.sh b/spytest/spytest/tgen/scapy/service.sh index 7b282716990..540090bb8e5 100755 --- a/spytest/spytest/tgen/scapy/service.sh +++ b/spytest/spytest/tgen/scapy/service.sh @@ -12,11 +12,16 @@ if [ -f $ROOT/service.pid ]; then rm -f $ROOT/service.pid fi +export TMPDIR=$ROOT/tmp/ +rm -rf $TMPDIR;mkdir $TMPDIR + mkdir -p $ROOT/logs VER=$(date +%Y_%d_%m-%H_%M_%S) -find $ROOT/logs/current -name "*.pid" | xargs -r -L 1 pkill -F -mv -f $ROOT/logs/current $ROOT/logs/$VER -mkdir -p $ROOT/logs/current +if [ -d $ROOT/logs/current ]; then + find $ROOT/logs/current -name "*.pid" | xargs -r -L 1 pkill -F + mv -f $ROOT/logs/current $ROOT/logs/$VER +fi +mkdir $ROOT/logs/current export HOME=$ROOT/logs/current export SCAPY_TGEN_LOGS_PATH=$HOME diff --git a/spytest/spytest/tgen/scapy/unit_test.py b/spytest/spytest/tgen/scapy/unit_test.py index 529865a6cf5..323021cbf53 100755 --- a/spytest/spytest/tgen/scapy/unit_test.py +++ b/spytest/spytest/tgen/scapy/unit_test.py @@ -4,7 +4,6 @@ import time import logging -from spytest.dicts import SpyTestDict from spytest.tgen.tg_scapy import ScapyClient import utilities.common as utils @@ -19,10 +18,7 @@ class TGScapyTest(ScapyClient): def __init__(self, tg_ip=None, tg_port=8009, tg_port_list=None): - self.tg_ip = tg_ip - self.tg_port_list = tg_port_list - self.tg_port_handle = SpyTestDict() - ScapyClient.__init__(self, None, tg_port) + ScapyClient.__init__(self, tg_ip, tg_port, tg_port_list) #os.environ["SCAPY_TGEN_PORTMAP"] = "vde" #self.scapy_connect() self.scapy_connect(True) diff --git a/spytest/spytest/tgen/scapy/ut_streams.py b/spytest/spytest/tgen/scapy/ut_streams.py index 53ddd4e7624..7f8014bbf86 100755 --- a/spytest/spytest/tgen/scapy/ut_streams.py +++ b/spytest/spytest/tgen/scapy/ut_streams.py @@ -64,6 +64,7 @@ def _build2(index=0): icmp_ndp_nam_r_flag=1, icmp_ndp_nam_s_flag=1, ipv6_hop_limit=255) if index == 15: return _build(rate_pps=1, l3_protocol='ipv4',ip_src_addr='11.1.1.1', ip_dst_addr='225.1.1.1',ip_protocol=2, \ l4_protocol='igmp',igmp_msg_type='report',igmp_group_addr='225.1.1.1',high_speed_result_analysis=0) + if index == 16: return _build(rate_pps=1, l3_protocol='ipv6', ipv6_src_addr='33f1::1', ipv6_dst_addr='7fe9::1', ipv6_dst_step='::1', ipv6_dst_mode='increment', ipv6_dst_count=10) return None def ut_stream_get(index=0, **kws): diff --git a/spytest/spytest/tgen/scapy/utils.py b/spytest/spytest/tgen/scapy/utils.py index 071f8d24111..a3d342386a3 100755 --- a/spytest/spytest/tgen/scapy/utils.py +++ b/spytest/spytest/tgen/scapy/utils.py @@ -1,10 +1,12 @@ import os import sys import time +import glob import random import socket import struct import inspect +import fnmatch import threading from collections import deque @@ -12,6 +14,11 @@ from tempfile import mkstemp import subprocess +try: + from StringIO import StringIO ## for Python 2 +except ImportError: + from io import StringIO ## for Python 3 + from logger import Logger class Utils(object): @@ -41,12 +48,12 @@ def fread(self, fname, default=""): def fwrite(self, content, fname = "", mode="w"): if fname == "" or self.dry: - _, fname = mkstemp() + tmp_dir = os.getenv("TMPDIR", "/tmp/scapy-tgen/tmp/") + Utils.ensure_folder(tmp_dir) + _, fname = mkstemp(prefix=tmp_dir) self.tmp_files.append(fname) else: - directory = os.path.dirname(fname) - if not os.path.exists(directory): - os.makedirs(directory) + Utils.ensure_parent(fname) with open(fname, mode) as fd: fd.write(content) return fname @@ -57,7 +64,7 @@ def fhead(self, fname, count, default=""): with open(fname, 'r') as fd: for line in islice(fd, count): lines.append(line) - return "".join(lines) + return "".join(lines).strip() except Exception: pass return default @@ -68,7 +75,7 @@ def ftail(self, fname, count, default=""): with open(fname, 'r') as fd: for line in deque(fd, maxlen=count): lines.append(line) - return "".join(lines) + return "".join(lines).strip() except Exception: pass return default @@ -79,14 +86,19 @@ def wc_l(self, fname): except Exception: return -1 - def cmdexec(self, cmd, msg=None): - self.logger.debug("cmdexec: " + cmd) - if self.dry: return "skipped-for-dry-run" + @staticmethod + def process_exec(cmd): p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) (out, err) = p.communicate() p.wait() return out or err + def cmdexec(self, cmd, msg=None, dbg=True): + if dbg: + self.logger.debug("cmdexec: " + cmd) + if self.dry: return "skipped-for-dry-run" + return self.process_exec(cmd) + def shexec(self, *args): cmds = "".join(args) fname = self.fwrite(cmds) @@ -114,11 +126,9 @@ def tshexec(self, cmdlist): th.join() def cat_file(self, filepath): - marker = "\n#######################" - content = marker + " cat {}\n".format(filepath) - content = content + self.fread(filepath) - content = content + marker + "\n" - return content + marker = "#######################" + content = self.fread(filepath).strip() + return "\n{0} {1}\n{0}\n".format(marker, content) def line_info(self): stk = inspect.stack() @@ -298,3 +308,58 @@ def make_list(arg): return arg return [arg] + def exec_func(self, func, *args, **kwargs): + this_stderr, this_stdout = StringIO(), StringIO() + save_stderr, save_stdout = sys.stderr, sys.stdout + sys.stderr, sys.stdout = this_stderr, this_stdout + func(*args, **kwargs) + sys.stderr, sys.stdout = save_stderr, save_stdout + msgs = map(str.strip, [this_stdout.getvalue(), this_stderr.getvalue()]) + return self.logger.debug("\n".join([s for s in msgs if s])) + + def exec_cmd(self, cmd): + ret = self.cmdexec(cmd, dbg=False) + ret = "\n".join([s for s in ret.split("\n") if s]) + return self.logger.debug(ret) + + @staticmethod + def list_files_tree(dir_path, pattern="*"): + matches = [] + for root, _, filenames in os.walk(dir_path): + for filename in fnmatch.filter(filenames, pattern): + matches.append(os.path.join(root, filename)) + return matches + + @staticmethod + def list_files(entry, pattern="*"): + if os.path.isdir(entry): + return Utils.list_files_tree(entry, pattern) + if os.path.isfile(entry): + return [entry] + return glob.glob(entry) + + @staticmethod + def ensure_folder(path): + path = os.path.abspath(path) + if not os.path.exists(path): + os.makedirs(path) + + @staticmethod + def ensure_parent(filename): + path = os.path.dirname(filename) + Utils.ensure_folder(path) + + @staticmethod + def get_ip_addr_dev(intf, ns=None): + cmd = "ip addr show dev {}".format(intf) + if ns: cmd = "ip netns exec {} {}".format(ns, cmd) + output = Utils.process_exec(cmd).split() + retval = {} + for x in ['inet', 'inet6', 'state', 'link/ether', 'ether']: + if x in output: + idx = output.index(x) + retval[x] = output[idx+1] + print(cmd, output, retval) + return retval + + diff --git a/spytest/spytest/tgen/tg.py b/spytest/spytest/tgen/tg.py index cc17e0d5cb1..20f2857fce4 100644 --- a/spytest/spytest/tgen/tg.py +++ b/spytest/spytest/tgen/tg.py @@ -1,6 +1,5 @@ import os import re -import sys import time import json import copy @@ -11,6 +10,7 @@ import utilities.parallel as putils from spytest.logger import Logger from spytest.tgen.init import tg_stc_load,tg_scapy_load,tg_ixia_load +from spytest.tgen.tg_stubs import TGStubs from spytest.tgen.tg_scapy import ScapyClient from spytest.dicts import SpyTestDict from netaddr import IPAddress @@ -103,8 +103,21 @@ def get_ixiangpf(): def get_ixnet(): return get_ixiangpf().ixnet -class TGBase(object): +def connect_retry(tg): + for i in range(0, 10): + ret_ds = tg.connect() + if ret_ds: + msg = "UNKNOWN" if "log" not in ret_ds else ret_ds.get('log', '') + logger.warning('TG Connect Error: %s try: %d' % (msg, i)) + tgen_wait(10) + else: + logger.info('TG Connection: Success') + return True + return False + +class TGBase(TGStubs): def __init__(self, tg_type, tg_version, tg_ip=None, tg_port_list=None): + TGStubs.__init__(self, logger) logger.info('TG Base Init...start') self.tg_ns = "" self.tg_type = tg_type @@ -122,17 +135,6 @@ def __init__(self, tg_type, tg_version, tg_ip=None, tg_port_list=None): if self.skip_traffic: return - for i in range(0, 10): - ret_ds = self.connect() - if ret_ds: - msg = "UNKNOWN" if "log" not in ret_ds else ret_ds['log'] - logger.warning('TG Connect Error: %s try: %d' % (msg, i)) - tgen_wait(10) - else: - logger.info('TG Connection: Success') - self.tg_connected = True - break - def manage_interface_config_handles(self, mode, port_handle, handle): logger.debug("manage_interface_config_handles: {} {} {}".format(mode, port_handle, handle)) if port_handle is None or handle is None: @@ -155,16 +157,6 @@ def ensure_traffic_control(self, timeout=60, skip_fail=False, **kwargs): def ensure_traffic_stats(self, timeout=60, skip_fail=False, **kwargs): pass - def clean_all(self): - return self.override() - - def connect(self): - self.override() - return None - - def show_status(self): - return self.override() - def instrument(self, phase, context): pass @@ -250,6 +242,12 @@ def tgen_eval(self, msg, func, **kwargs): def get_port_handle(self, port): return self.tg_port_handle.get(port, None) + def set_port_handle(self, port, value): + if port: + self.tg_port_handle[port] = value + else: + self.tg_port_handle.clear() + def get_port_handle_list(self): ph_list = list() for _, handle in self.tg_port_handle.items(): @@ -380,7 +378,6 @@ def trgen_pre_proc(self, fname, **kwargs): op_type = kwargs.pop('output_type',None) if self.tg_type == 'ixia': self.get_capture_stats_state(kwargs.get('port_handle')) - if self.ix_port == '443': tgen_wait(5, 'waiting to stabilize the captured packets') if op_type == 'hex': func = self.get_hltapi_name('self.local_get_captured_packets') else: @@ -422,6 +419,8 @@ def trgen_pre_proc(self, fname, **kwargs): if "status" not in ret_ds: logger.warning(ret_ds) msg = "Unknown" if "log" not in ret_ds else ret_ds['log'] + if self.tg_type == 'stc' and not ret_ds: + tgen_abort("nolog", "tgen_failed_abort", str(msg)) self.fail("nolog", "tgen_failed_api", msg) elif ret_ds['status'] == '1': logger.debug('TG API Run Status: Success') @@ -626,48 +625,11 @@ def trgen_adjust_mismatch_params(self, fname, **kwargs): pass def local_stc_tapi_call(self,param): pass - def override(self, name=None): - name = name or sys._getframe(1).f_code.co_name - logger.error("{} should be overriden".format(name)) - return {} - def tg_interface_handle(self, ret_ds): - return self.override() - def tg_interface_config(self, **kwargs): - return self.override() - def tg_test_control(self, **kwargs): - return self.override() - def tg_packet_control(self, **kwargs): - return self.override() - def tg_traffic_control(self, **kwargs): - return self.override() - def tg_topology_config(self, **kwargs): - return self.override() - def tg_packet_stats(self, **kwargs): - return self.override() - def tg_traffic_stats(self, **kwargs): - return self.override() - def tg_interface_stats(self, **kwargs): - return self.override() - def tg_packet_config_buffers(self, **kwargs): - return self.override() - def tg_packet_config_filter(self, **kwargs): - return self.override() - def tg_packet_config_triggers(self, **kwargs): - return self.override() - def tg_convert_porthandle_to_vport(self, **kwargs): - return self.override() - def tg_protocol_info(self, **kwargs): - return self.override() - def tg_withdraw_bgp_routes(self, route_handle): - return self.override() - def tg_readvertise_bgp_routes(self, handle, route_handle): - return self.override() - def tg_emulation_ospf_config(self, **kwargs): - return self.override() class TGStc(TGBase): def __init__(self, tg_type, tg_version, tg_ip=None, tg_port_list=None): TGBase.__init__(self, tg_type, tg_version, tg_ip, tg_port_list) + self.tg_connected = connect_retry(self) logger.info('TG STC Init...done') def clean_all(self): @@ -705,7 +667,7 @@ def connect(self): ret_ds = get_sth().connect(device=self.tg_ip, port_list=self.tg_port_list, break_locks=1) logger.info(ret_ds) - if ret_ds['status'] != '1': + if ret_ds.get('status') != '1': return ret_ds port_handle_list=[] for port in self.tg_port_list: @@ -834,9 +796,9 @@ def trgen_adjust_mismatch_params(self, fname, **kwargs): kwargs['enable_ping_response'] = 1 elif fname == 'tg_emulation_bgp_config': if kwargs.get('enable_4_byte_as') != None: - l_as = int(kwargs['local_as']) / 65536 + l_as = int(int(kwargs['local_as']) / 65536) l_nn = int(kwargs['local_as']) - (l_as * 65536) - r_as = int(kwargs['remote_as']) / 65536 + r_as = int(int(kwargs['remote_as']) / 65536) r_nn = int(kwargs['remote_as']) - (r_as * 65536) kwargs['local_as4'] = str(l_as)+":"+str(l_nn) kwargs['remote_as4'] = str(r_as)+":"+str(r_nn) @@ -906,10 +868,10 @@ def tg_disconnect(self,**kwargs): port_handle_list = self.get_port_handle_list() ret_ds = get_sth().cleanup_session(port_handle=port_handle_list) logger.info(ret_ds) - if ret_ds['status'] == '1': + if ret_ds.get('status') == '1': logger.debug('TG API Run Status: Success') else: - logger.warning('TG API Error: %s' % ret_ds['log']) + logger.warning('TG API Error: %s' % ret_ds.get('log', '')) self.tg_connected = False self.tg_port_handle.clear() @@ -1163,6 +1125,7 @@ def __init__(self, tg_type, tg_version, tg_ip=None, tg_port_list=None, ix_server self.topo_handle = {} self.traffic_config_handles = {} TGBase.__init__(self, tg_type, tg_version, tg_ip, tg_port_list) + self.tg_connected = connect_retry(self) logger.info('TG Ixia Init...done') def clean_all(self): @@ -2024,6 +1987,7 @@ def get_capture_stats_state(self, port): break state = get_ixnet().getAttribute(capture, cap_type) logger.info("Total time taken to capture ready {} sec".format(time.time() - start)) + if self.ix_port == '443': tgen_wait(5, 'waiting to stabilize the captured packets') def get_emulation_handle_prefixes(self, ret_ds, kwargs): ip_dict = dict() @@ -2047,11 +2011,18 @@ def get_emulation_handle_prefixes(self, ret_ds, kwargs): logger.info('IP PREFIXES: {}'.format(ip_dict)) -class TGScapy(ScapyClient, TGBase): +class TGScapy(TGBase): def __init__(self, tg_type, tg_version, tg_ip=None, tg_port=8009, tg_port_list=None): logger.info('TG Scapy Init') - ScapyClient.__init__(self, logger, tg_port) TGBase.__init__(self, tg_type, tg_version, tg_ip, tg_port_list) + self.sc = ScapyClient(logger, tg_ip, tg_port, tg_port_list, self) + self.tg_connected = connect_retry(self) + + def __getattribute__(self, name): + try: + return object.__getattribute__(self.sc, name) + except Exception: + return object.__getattribute__(self, name) def clean_all(self): self.server_control("clean-all", "") diff --git a/spytest/spytest/tgen/tg_scapy.py b/spytest/spytest/tgen/tg_scapy.py index 19493a11a19..1feb46c38e4 100644 --- a/spytest/spytest/tgen/tg_scapy.py +++ b/spytest/spytest/tgen/tg_scapy.py @@ -1,60 +1,94 @@ import os import sys import copy +import time import logging import spytest.paths as paths class ScapyClient(object): - def __init__(self, logger, port=8009): + def __init__(self, logger, tg_ip=None, tg_port=8009, tg_port_list=None, base=None): + self.base = base self.conn = None - self.tg_port = port self.logger = logger or logging.getLogger() self.use_pyro = True self.node_name = "" self.filemode = bool(os.getenv("SPYTEST_FILE_MODE", "0") != "0") - self.tg_ip = getattr(self, "tg_ip", None) - self.tg_port_list = getattr(self, "tg_port_list", []) - self.tg_port_handle = getattr(self, "tg_port_handle", {}) + self.tg_ip = tg_ip + self.tg_port = tg_port + self.tg_port_list = tg_port_list or [] + self.tg_port_handle = {} root = os.path.join(os.path.dirname(__file__), '..') root = os.path.abspath(root) sys.path.append(os.path.join(root)) - def log_call(self, fname, **kwargs): - opts = ', '.join(['{}={!r}'.format(k, v) for k, v in kwargs.items()]) - self.logger.info("TODO {} {}".format(fname, opts)) + def _set_port_handle(self, port, value): + if self.base: + self.base.set_port_handle(port, value) + elif not port: + self.tg_port_handle.clear() + else: + self.tg_port_handle[port] = value + + def _log_call(self, fname, **kwargs): + if self.base: + self.base.log_call(fname, **kwargs) + else: + opts = ', '.join(['{}={!r}'.format(k, v) for k, v in kwargs.items()]) + self.logger.info("TODO {} {}".format(fname, opts)) def save_log(self, name, data): - self.logger.info("TODO {} {}".format(name, data)) + if self.base: + self.base.save_log(name, data) + else: + self.logger.info("TODO {} {}".format(name, data)) - def api_fail(self, msg): - self.logger.info("TODO {}".format(msg)) + def _api_fail(self, msg): + if self.base: + self.base.api_fail(msg) + else: + self.logger.info("TODO {}".format(msg)) def log_info(self, *args): self.logger.info(*args) + def log_remote_alerts(self, phase): + func_name = "server_control" + errs1 = "==================== REMOTE ALERTS ============================" + errs2 = self._execute(func_name, self.conn.server_control, "get-alerts", "") + errs3 = "===============================================================" + if errs2.strip(): + self.logger.info("\n".join(["\n",errs1, errs2, errs3,"\n"])) + def server_control(self, phase, context): + func_name = "server_control" if self.filemode: return elif phase == "clean-all": - self.execute(self.conn.server_control, "clean-all", "") + self._execute(func_name, self.conn.server_control, "clean-all", "") elif phase == "pre-test": - self.execute(self.conn.server_control, "add-log", "test-start " + context) + self._execute(func_name, self.conn.server_control, "add-log", "test-start {} {}".format(context, self.node_name)) + self.log_remote_alerts(phase) elif phase == "post-test": - self.execute(self.conn.server_control, "add-log", "test-finish " + context) + self._execute(func_name, self.conn.server_control, "add-log", "test-finish {} {}".format(context, self.node_name)) + self.log_remote_alerts(phase) elif phase == "pre-module-prolog": - self.execute(self.conn.server_control, "init-log", context) + local_file = "{}.tgen".format(paths.get_mlog_name(context)) + path = self._execute(func_name, self.conn.server_control, "init-log", local_file) + self.logger.info("Server Logs Path {}".format(path)) + self.log_remote_alerts(phase) elif phase == "post-module-epilog": self.log_info("ScapyClient instrument: {} {}".format(phase, context)) local_file = "{}.tgen".format(paths.get_mlog_name(context)) self.log_info("ScapyClient instrument: {} {}".format(phase, local_file)) try: - data = self.execute(self.conn.server_control, "read-log", context) + data = self._execute(func_name, self.conn.server_control, "read-log", local_file) self.save_log(local_file, data) except Exception as exp: self.log_info("Failed to read log {} {}".format(context, str(exp))) + self.log_remote_alerts(phase) else: self.log_info("ScapyClient instrument: ignored {} {}".format(phase, context)) @@ -91,41 +125,55 @@ def scapy_connect(self, dry_run=False): except Exception: max_pps = 100 model = os.getenv("SCAPY_TGEN_PORTMAP", "eth1") self.node_name = os.getenv("PYTEST_XDIST_WORKER", "") - self.execute(self.conn.server_control, "set-name", self.node_name) - self.execute(self.conn.server_control, "set-dbg-lvl", dbg_lvl) - self.execute(self.conn.server_control, "set-dry-run", dry_run) - self.execute(self.conn.server_control, "set-max-pps", max_pps) - self.execute(self.conn.server_control, "set-model", model) - self.execute(self.conn.server_control, "init-log", "default") + func_name = "server_control" + for _ in range(5): + try: + self._execute(func_name, self.conn.server_control, "set-name", self.node_name) + break + except Exception: + time.sleep(10) + path = self._execute(func_name, self.conn.server_control, "init-log", "default") + self.logger.info("Server Logs Path {}".format(path)) + self._execute(func_name, self.conn.server_control, "set-dbg-lvl", dbg_lvl) + self._execute(func_name, self.conn.server_control, "set-dry-run", dry_run) + self._execute(func_name, self.conn.server_control, "set-max-pps", max_pps) + self._execute(func_name, self.conn.server_control, "set-model", model) res = self.tg_connect(port_list=self.tg_port_list) - self.tg_port_handle.clear() + self._set_port_handle(None, None) for port in self.tg_port_list: - self.tg_port_handle[port] = res['port_handle'][port] + self._set_port_handle(port, res['port_handle'][port]) return None - def log_api(self, *args, **kws): + def log_api(self, *args, **kwargs): func = sys._getframe(1).f_code.co_name - self.log_call(func, **kws) + self._log_call(func, **kwargs) - def fix_newstr(self, kws): + def fix_newstr(self, kwargs): from future.types import newstr - for key, value in kws.items(): + for key, value in kwargs.items(): if isinstance(value, newstr): - kws[key] = str(value) + kwargs[key] = str(value) + + def execute(self, func, *args, **kwargs): + func_name = sys._getframe(1).f_code.co_name + return self._execute(func_name, func, *args, **kwargs) - def execute(self, func, *args, **kws): + def _execute(self, func_name, func, *args, **kwargs): + #msg1 = " ".join(map(str,args)) + #msg2 = ','.join(['{}={!r}'.format(k,v) for k,v in kwargs.items()]) + #self.log_info("ScapyClient: {} {} {} {}".format(self.node_name, func_name, msg1, msg2)) try: - res = func(self.node_name, *args, **kws) + res = func(self.node_name, *args, **kwargs) return copy.copy(res) except Exception as exp: msg = "{}".format(exp) if self.use_pyro: import Pyro4 msg = msg + "".join(Pyro4.util.getPyroTraceback()) - self.api_fail(msg) + self._api_fail(msg) raise exp - def sim_execute(self, *args, **kws): + def sim_execute(self, *args, **kwargs): func_name = sys._getframe(1).f_code.co_name sim_id = getattr(self, "sim_id", 0) + 1 self.sim_id = sim_id @@ -143,8 +191,8 @@ def sim_execute(self, *args, **kws): return {"mul_source_handle" : str(sim_id)} elif func_name == "tg_traffic_stats": rv = {"status" : "1", "traffic_item": {}} - tx_ph = kws.get("port_handle", None) - mode = kws.get("mode", "aggregate") + tx_ph = kwargs.get("port_handle", None) + mode = kwargs.get("mode", "aggregate") tx_ph_value = {mode: {}} tx_ph_value[mode]["tx"] = {"raw_pkt_count":0, "total_pkts":0, "pkt_byte_count":0} tx_ph_value[mode]["rx"] = {"total_pkts":0, "pkt_byte_count":0} @@ -152,73 +200,73 @@ def sim_execute(self, *args, **kws): return rv return None - def tg_connect(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_connect, *args, **kws) - def tg_disconnect(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_disconnect, *args, **kws) - def tg_traffic_control(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_traffic_control, *args, **kws) - def tg_interface_control(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_interface_control, *args, **kws) - def tg_packet_control(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_packet_control, *args, **kws) - def tg_packet_stats(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_packet_stats, *args, **kws) - def tg_traffic_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - self.fix_newstr(kws) - return self.execute(self.conn.tg_traffic_config, *args, **kws) - def tg_interface_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_interface_config, *args, **kws) - def tg_traffic_stats(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_traffic_stats, *args, **kws) - def tg_emulation_bgp_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_bgp_config, *args, **kws) - def tg_emulation_bgp_route_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_bgp_route_config, *args, **kws) - def tg_emulation_bgp_control(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_bgp_control, *args, **kws) - def tg_emulation_igmp_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_igmp_config, *args, **kws) - def tg_emulation_multicast_group_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_multicast_group_config, *args, **kws) - def tg_emulation_multicast_source_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_multicast_source_config, *args, **kws) - def tg_emulation_igmp_group_config(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_igmp_group_config, *args, **kws) - def tg_emulation_igmp_control(self, *args, **kws): - self.log_api(*args, **kws) - if self.filemode: return self.sim_execute(*args, **kws) - return self.execute(self.conn.tg_emulation_igmp_control, *args, **kws) + def tg_connect(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_connect, *args, **kwargs) + def tg_disconnect(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_disconnect, *args, **kwargs) + def tg_traffic_control(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_traffic_control, *args, **kwargs) + def tg_interface_control(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_interface_control, *args, **kwargs) + def tg_packet_control(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_packet_control, *args, **kwargs) + def tg_packet_stats(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_packet_stats, *args, **kwargs) + def tg_traffic_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + self.fix_newstr(kwargs) + return self.execute(self.conn.tg_traffic_config, *args, **kwargs) + def tg_interface_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_interface_config, *args, **kwargs) + def tg_traffic_stats(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_traffic_stats, *args, **kwargs) + def tg_emulation_bgp_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_bgp_config, *args, **kwargs) + def tg_emulation_bgp_route_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_bgp_route_config, *args, **kwargs) + def tg_emulation_bgp_control(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_bgp_control, *args, **kwargs) + def tg_emulation_igmp_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_igmp_config, *args, **kwargs) + def tg_emulation_multicast_group_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_multicast_group_config, *args, **kwargs) + def tg_emulation_multicast_source_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_multicast_source_config, *args, **kwargs) + def tg_emulation_igmp_group_config(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_igmp_group_config, *args, **kwargs) + def tg_emulation_igmp_control(self, *args, **kwargs): + self.log_api(*args, **kwargs) + if self.filemode: return self.sim_execute(*args, **kwargs) + return self.execute(self.conn.tg_emulation_igmp_control, *args, **kwargs) diff --git a/spytest/spytest/tgen/tg_stubs.py b/spytest/spytest/tgen/tg_stubs.py new file mode 100644 index 00000000000..cbbabe6dc39 --- /dev/null +++ b/spytest/spytest/tgen/tg_stubs.py @@ -0,0 +1,66 @@ +import sys + +class TGStubs(object): + def __init__(self, logger): + self.logger = logger + def override(self, name=None): + name = name or sys._getframe(1).f_code.co_name + self.logger.error("{} should be overriden".format(name)) + return {} + def clean_all(self): + return self.override() + def show_status(self): + return self.override() + def tg_interface_handle(self, ret_ds): + return self.override() + def tg_interface_config(self, *args, **kwargs): + return self.override() + def tg_test_control(self, **kwargs): + return self.override() + def tg_packet_control(self, *args, **kwargs): + return self.override() + def tg_traffic_control(self, *args, **kwargs): + return self.override() + def tg_topology_config(self, **kwargs): + return self.override() + def tg_packet_stats(self, *args, **kwargs): + return self.override() + def tg_traffic_stats(self, *args, **kwargs): + return self.override() + def tg_interface_stats(self, **kwargs): + return self.override() + def tg_packet_config_buffers(self, **kwargs): + return self.override() + def tg_packet_config_filter(self, **kwargs): + return self.override() + def tg_packet_config_triggers(self, **kwargs): + return self.override() + def tg_convert_porthandle_to_vport(self, **kwargs): + return self.override() + def tg_protocol_info(self, **kwargs): + return self.override() + def tg_withdraw_bgp_routes(self, route_handle): + return self.override() + def tg_readvertise_bgp_routes(self, handle, route_handle): + return self.override() + def tg_emulation_bgp_config(self, **kwargs): + return self.override() + def tg_emulation_bgp_control(self, **kwargs): + return self.override() + def tg_emulation_bgp_route_config(self, **kwargs): + return self.override() + def tg_emulation_ospf_config(self, **kwargs): + return self.override() + def tg_emulation_ospf_lsa_config(self, **kwargs): + return self.override() + def tg_emulation_ospf_network_group_config(self, **kwargs): + return self.override() + def tg_emulation_ospf_topology_route_config(self, **kwargs): + return self.override() + def tg_emulation_igmp_control(self, **kwargs): + return self.override() + def tg_emulation_igmp_querier_control(self, **kwargs): + return self.override() + def tg_emulation_multicast_source_config(self, **kwargs): + return self.override() + diff --git a/spytest/spytest/tgen/tgen_utils.py b/spytest/spytest/tgen/tgen_utils.py index 992675affbe..8f58f78a4b0 100644 --- a/spytest/spytest/tgen/tgen_utils.py +++ b/spytest/spytest/tgen/tgen_utils.py @@ -42,7 +42,7 @@ def _validate_parameters(tr_details): return True -def get_counter_name(mode,tg_type,comp_type,direction): +def get_counter_name(mode,tg_type,comp_type,direction,logger=True): tg_type2 = "ixia" if tg_type == "scapy" else tg_type traffic_counters = { 'aggregate': { @@ -126,7 +126,8 @@ def get_counter_name(mode,tg_type,comp_type,direction): } counter_name = traffic_counters[mode][tg_type2][direction][comp_type] - st.debug('TG type: {}, Comp_type: {}, Direction: {}, Counter_name: {}'.format(tg_type, comp_type, direction, counter_name)) + if logger: + st.debug('TG type: {}, Comp_type: {}, Direction: {}, Counter_name: {}'.format(tg_type, comp_type, direction, counter_name)) return counter_name @@ -150,10 +151,13 @@ def _fetch_stats(obj, port, mode, comp_type, direction, stream_elem=None): """ stats_mode = 'streamblock' if mode in ['streams', 'traffic_item'] else mode - counter_name = get_counter_name(stats_mode, obj.tg_type, comp_type, direction) + counter_name = get_counter_name(stats_mode, obj.tg_type, comp_type, direction, logger=False) tx_counter = int() stats = dict() - for loop in range(1, 4): + for loop in range(0, 4): + if loop > 0: + st.log('TG stats are not fully ready. Trying to fetch stats again.... iteration {}'.format(loop)) + st.wait(2, 'waiting before fetch stats again') stats = obj.tg_traffic_stats(port_handle=port, mode=mode) if obj.tg_type == 'stc': if mode == 'streams': @@ -172,10 +176,6 @@ def _fetch_stats(obj, port, mode, comp_type, direction, stream_elem=None): tx_counter = 0 if stats.get('waiting_for_stats', '1') == '0' and (direction == 'rx' or tx_counter != 0): break - - st.wait(2, 'waiting before fetch stats again') - st.log('TG stats are not fully ready. Trying to fetch stats again.... iteration {}'.format(loop)) - if direction == 'tx' and not tx_counter: (name, value) = ('stream handle', stream_elem) if stream_elem else ('port', port) msg = 'TX Counter {} is 0 (zero) for {}: {}'.format(counter_name, name, value) @@ -192,17 +192,6 @@ def _verify_aggregate_stats(tr_details,mode,comp_type,tolerance_factor,delay_fac delay = 5 * float(delay_factor) tolerance = 5 * float(tolerance_factor) - ################### scapy ###################### - for tr_pair in range(1,len(tr_details)+1): - tr_pair = str(tr_pair) - tx_objs = tr_details[tr_pair]['tx_obj'] - for obj in tx_objs: - if obj.tg_type == 'scapy': - delay = 1 * delay - break - break - ################### scapy ###################### - st.tg_wait(delay, "aggregate_stats") port_stats = dict() @@ -214,44 +203,61 @@ def _verify_aggregate_stats(tr_details,mode,comp_type,tolerance_factor,delay_fac rx_ports = tr_details[tr_pair]['rx_ports'] rx_obj = tr_details[tr_pair]['rx_obj'] + cmsg = 'Pair: {}, Transmit Ports: {}, Receive Ports: {}'.format(tr_pair, tx_ports, rx_ports) st.log('Validating Traffic, Pair: {}, Transmit Ports: {}, Receive Ports: {}'.format(tr_pair,tx_ports,rx_ports)) - exp_val = 0 - for port,obj,ratio in zip(tx_ports,tx_obj,exp_ratio): - tx_ph = obj.get_port_handle(port) - # tx_stats = obj.tg_traffic_stats(port_handle=tx_ph, mode=mode) - if tx_ph not in port_stats: - port_stats[tx_ph] = _fetch_stats(obj, tx_ph, mode, comp_type, 'tx') - tx_stats = port_stats[tx_ph] - #st.debug(tx_stats) - counter_name = get_counter_name(mode,obj.tg_type,comp_type,'tx') - cur_tx_val = int(tx_stats[tx_ph][mode]['tx'][counter_name]) - st.log('Transmit counter_name: {}, counter_val: {}'.format(counter_name,cur_tx_val)) - exp_val += cur_tx_val * ratio - st.log('Total Tx from ports {}: {}'.format(tx_ports,exp_val)) - - for port,obj in zip(rx_ports,rx_obj): - rx_ph = obj.get_port_handle(port) - # rx_stats = obj.tg_traffic_stats(port_handle=rx_ph, mode=mode) - if rx_ph not in port_stats: - port_stats[rx_ph] = _fetch_stats(obj, rx_ph, mode, comp_type, 'rx') - rx_stats = port_stats[rx_ph] - #st.debug(rx_stats) - counter_name = get_counter_name(mode,obj.tg_type,comp_type,'rx') - real_rx_val = int(rx_stats[rx_ph][mode]['rx'][counter_name]) - st.log('Receive counter_name: {}, counter_val: {}'.format(counter_name,real_rx_val)) - - st.log('Total Rx on ports {}: {}'.format(rx_ports, real_rx_val)) - - diff = (abs(exp_val - real_rx_val) * 100.0) / exp_val if exp_val > 0 else abs(real_rx_val * 100.0 / cur_tx_val) - if diff <= tolerance: - st.log('Traffic Validation: {}, Pair: {}, Transmit Ports: {}, Receive Ports: {}'.format('Success',tr_pair,tx_ports,rx_ports)) - st.log('Got expected values; Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) - ret_all.append(True) - else: + retry_count = retry + 1 if not retry and comp_type == 'packet_rate' else retry + tx_ph, rx_ph = None, None + for loop in range(0, int(retry_count) + 1): + if loop > 0: + st.log('The difference is not in the given tolerance. So, retrying the stats fetch once again....{}'.format(loop)) + st.wait(2, 'waiting to before fetch stats again') + for port in [tx_ph, rx_ph]: port_stats.pop(port, '') + exp_val = 0 + for port,obj,ratio in zip(tx_ports,tx_obj,exp_ratio): + tx_ph = obj.get_port_handle(port) + # tx_stats = obj.tg_traffic_stats(port_handle=tx_ph, mode=mode) + if tx_ph not in port_stats: + port_stats[tx_ph] = _fetch_stats(obj, tx_ph, mode, comp_type, 'tx') + tx_stats = port_stats[tx_ph] + #st.debug(tx_stats) + counter_name = get_counter_name(mode,obj.tg_type,comp_type,'tx') + cur_tx_val = int(tx_stats[tx_ph][mode]['tx'][counter_name]) + st.log('Transmit counter_name: {}, counter_val: {}'.format(counter_name,cur_tx_val)) + exp_val += cur_tx_val * ratio + st.log('Total Tx from ports {}: {}'.format(tx_ports,exp_val)) + + for port,obj in zip(rx_ports,rx_obj): + rx_ph = obj.get_port_handle(port) + # rx_stats = obj.tg_traffic_stats(port_handle=rx_ph, mode=mode) + if rx_ph not in port_stats: + port_stats[rx_ph] = _fetch_stats(obj, rx_ph, mode, comp_type, 'rx') + rx_stats = port_stats[rx_ph] + #st.debug(rx_stats) + counter_name = get_counter_name(mode,obj.tg_type,comp_type,'rx') + real_rx_val = int(rx_stats[rx_ph][mode]['rx'][counter_name]) + st.log('Receive counter_name: {}, counter_val: {}'.format(counter_name,real_rx_val)) + + st.log('Total Rx on ports {}: {}'.format(rx_ports, real_rx_val)) + + diff = (abs(exp_val - real_rx_val) * 100.0) / exp_val if exp_val > 0 else abs(real_rx_val * 100.0 / cur_tx_val) + if diff <= tolerance: + st.log('Traffic Validation: {}, {}'.format('Success',cmsg)) + st.log('Got expected values; Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) + ret_all.append(True) + break + elif loop != retry_count: + if not retry and comp_type == 'packet_rate' and diff > (tolerance + 5): + msg = 'The traffic difference is in not between {} and {}. Skipping the retry'.format(tolerance, tolerance + 5) + st.debug(msg) + else: + st.log('Traffic Varification: {}, {}'.format('Failure', cmsg)) + st.log('Expected: {}, Actual: {}, diff%: {}'.format(exp_val, real_rx_val, diff)) + continue return_value = False ret_all.append(False) - st.log('Traffic Validation: {}, Pair: {}, Transmit Ports: {}, Receive Ports: {}'.format('Failure',tr_pair,tx_ports,rx_ports)) + st.log('Traffic Validation: {}, {}'.format('Failure', cmsg)) st.log('Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) + break return return_value if return_all==0 else (return_value, ret_all) @@ -287,58 +293,72 @@ def _verify_streamlevel_stats(tr_details,mode,comp_type,tolerance_factor,delay_f ratio = ratio * len(stream) tx_ph = txObj.get_port_handle(txPort) for strelem,ratelem in zip(stream,ratio): - exp_val = 0 - if rx_obj.tg_type == 'stc': - tx_counter_name = get_counter_name(mode,rx_obj.tg_type,comp_type,'tx') - rx_counter_name = get_counter_name(mode,rx_obj.tg_type,comp_type,'rx') - #rx_stats = rx_obj.tg_traffic_stats(port_handle=rx_ph,mode='streams',streams=strelem) - if tx_ph not in stream_stats: - stream_stats[tx_ph] = _fetch_stats(txObj, tx_ph, 'streams', comp_type, 'tx', stream_elem=strelem) - rx_stats = stream_stats[tx_ph] - exp_val = int(rx_stats[tx_ph]['stream'][strelem]['tx'][tx_counter_name]) - tx_val=exp_val - exp_val = int(exp_val * float(ratelem)) - real_rx_val = int(rx_stats[tx_ph]['stream'][strelem]['rx'][rx_counter_name]) - elif rx_obj.tg_type in ['ixia', 'scapy']: - tx_counter_name = get_counter_name(mode,rx_obj.tg_type,comp_type,'tx') - rx_counter_name = get_counter_name(mode,rx_obj.tg_type,comp_type,'rx') - #rx_stats = rx_obj.tg_traffic_stats(port_handle=rx_ph, mode='traffic_item') - if rx_ph not in stream_stats: - stream_stats[rx_ph] = _fetch_stats(rx_obj, rx_ph, 'traffic_item', comp_type, 'tx', stream_elem=strelem) - rx_stats = stream_stats[rx_ph] - - #Following check is to avoid KeyError traffic_item. Reason is traffic was not started. - #Ixia team is looking into why traffic was not started - might be setup issue - if rx_stats['status'] != '1': - st.error('Could not get traffic_stats from the TGEN, Please check if traffic was started') - return False - try: - exp_val = float(rx_stats['traffic_item'][strelem]['tx'][tx_counter_name]) - except Exception: - st.error('Could not get tx counter from the TGEN, Please check if traffic was started') - return False - - tx_val=exp_val - exp_val = int(exp_val * float(ratelem)) - - try: - real_rx_val = float(rx_stats['traffic_item'][strelem]['rx'][rx_counter_name]) - except Exception: - st.error('Could not get rx counter from the TGEN, Please check if traffic was started') - return False - - st.log('RX counter {} = {}'.format(rx_counter_name,real_rx_val)) - if tx_val > 0: - diff = (abs(exp_val - real_rx_val) * 100.0) / exp_val if exp_val > 0 else abs(real_rx_val * 100.0 / tx_val) - if diff <= tolerance: - st.log('Traffic Validation: {}, {} streamid: {}'.format('Success',cmsg, strelem)) - st.log('Got expected values; Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) - ret_all.append(True) - continue - return_value = False - ret_all.append(False) - st.log('Traffic Validation: {}, {} streamid: {}'.format('Failure',cmsg, strelem)) - st.log('Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) + retry_count = retry + 1 if not retry and comp_type == 'packet_rate' else retry + for loop in range(0, int(retry_count)+1): + if loop > 0: + st.log('The difference is not in the given tolerance. So, retrying the stats fetch once again....{}'.format(loop)) + st.wait(2, 'waiting to before fetch stats again') + if rx_obj.tg_type == 'stc': stream_stats.pop(tx_ph, '') + if rx_obj.tg_type in ['ixia', 'scapy']: stream_stats.pop(rx_ph, '') + exp_val = 0 + tx_counter_name = get_counter_name(mode, rx_obj.tg_type, comp_type, 'tx') + rx_counter_name = get_counter_name(mode, rx_obj.tg_type, comp_type, 'rx') + if rx_obj.tg_type == 'stc': + #rx_stats = rx_obj.tg_traffic_stats(port_handle=rx_ph,mode='streams',streams=strelem) + if tx_ph not in stream_stats: + stream_stats[tx_ph] = _fetch_stats(txObj, tx_ph, 'streams', comp_type, 'tx', stream_elem=strelem) + rx_stats = stream_stats[tx_ph] + exp_val = int(rx_stats[tx_ph]['stream'][strelem]['tx'][tx_counter_name]) + tx_val=exp_val + exp_val = int(exp_val * float(ratelem)) + real_rx_val = int(rx_stats[tx_ph]['stream'][strelem]['rx'][rx_counter_name]) + elif rx_obj.tg_type in ['ixia', 'scapy']: + #rx_stats = rx_obj.tg_traffic_stats(port_handle=rx_ph, mode='traffic_item') + if rx_ph not in stream_stats: + stream_stats[rx_ph] = _fetch_stats(rx_obj, rx_ph, 'traffic_item', comp_type, 'tx', stream_elem=strelem) + rx_stats = stream_stats[rx_ph] + + #Following check is to avoid KeyError traffic_item. Reason is traffic was not started. + #Ixia team is looking into why traffic was not started - might be setup issue + if rx_stats['status'] != '1': + st.error('Could not get traffic_stats from the TGEN, Please check if traffic was started') + return False + try: + exp_val = float(rx_stats['traffic_item'][strelem]['tx'][tx_counter_name]) + except Exception: + st.error('Could not get tx counter from the TGEN, Please check if traffic was started') + return False + + tx_val=exp_val + exp_val = int(exp_val * float(ratelem)) + + try: + real_rx_val = float(rx_stats['traffic_item'][strelem]['rx'][rx_counter_name]) + except Exception: + st.error('Could not get rx counter from the TGEN, Please check if traffic was started') + return False + + st.log('RX counter {} = {}'.format(rx_counter_name, real_rx_val)) + if tx_val > 0: + diff = (abs(exp_val - real_rx_val) * 100.0) / exp_val if exp_val > 0 else abs(real_rx_val * 100.0 / tx_val) + if diff <= tolerance: + st.log('Traffic Validation: {}, {} streamid: {}'.format('Success',cmsg, strelem)) + st.log('Got expected values; Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) + ret_all.append(True) + break + if loop != retry_count: + if not retry and comp_type == 'packet_rate' and diff > (tolerance + 5): + msg = 'The traffic difference is in not between {} and {}. Skipping the retry'.format(tolerance, tolerance + 5) + st.debug(msg) + else: + st.log('Traffic Varification: {}, {} streamid: {}'.format('Failure', cmsg, strelem)) + st.log('Expected: {}, Actual: {}, diff%: {}'.format(exp_val, real_rx_val, diff)) + continue + return_value = False + ret_all.append(False) + st.log('Traffic Validation: {}, {} streamid: {}'.format('Failure',cmsg, strelem)) + st.log('Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) + break return return_value if return_all==0 else (return_value, ret_all) def _verify_analyzer_filter_stats(tr_details,mode,comp_type,tolerance_factor,delay_factor,retry,return_all): @@ -413,15 +433,15 @@ def _verify_analyzer_filter_stats(tr_details,mode,comp_type,tolerance_factor,del st.log('Receive counter_name: {}, counter_val: {}'.format(rx_counter_name,real_rx_val)) diff = (abs(exp_val - real_rx_val) * 100.0) / exp_val if exp_val > 0 else abs(real_rx_val * 100.0 / tx_val) if diff <= tolerance: - st.log('Traffic Validation: {}, Pair: {}, Transmit Ports: {}, Receive Ports: {}\ - streamid: {}, filter param: {}, filter value: {}'.format('Success',tr_pair,tx_ports,rx_ports,strelem,fpelem,fvelem)) + st.log('Traffic Validation: {}, Pair: {}, Transmit Ports: {}, Receive Ports: {}, streamid: {}, filter param: {}, filter value: {}'.format( + 'Success', tr_pair, tx_ports, rx_ports, strelem, fpelem, fvelem)) st.log('Got expected values; Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) ret_all.append(True) else: return_value = False ret_all.append(False) - st.log('Traffic Validation: {}, Pair: {}, Transmit Ports: {}, Receive Ports: {}\ - streamid: {}, filter param: {}, filter value: {}'.format('Failure',tr_pair,tx_ports,rx_ports,strelem,fpelem,fvelem)) + st.log('Traffic Validation: {}, Pair: {}, Transmit Ports: {}, Receive Ports: {}, streamid: {}, filter param: {}, filter value: {}'.format( + 'Failure', tr_pair, tx_ports, rx_ports, strelem, fpelem, fvelem)) st.log('Expected: {}, Actual: {}, diff%: {}'.format(exp_val,real_rx_val,diff)) return return_value if return_all==0 else (return_value, ret_all) @@ -528,7 +548,7 @@ def validate_tgen_traffic(**kwargs): delay_factor = kwargs.get('delay_factor',1) delay_factor = delay_factor if float(delay_factor) >= 1 else 1 tolerance_factor = kwargs.get('tolerance_factor',1) - retry = kwargs.get('retry',1) + retry = kwargs.get('retry',0) comp_type = kwargs.get('comp_type','packet_count') return_all = kwargs.get('return_all',0) mode = kwargs.get('mode').lower() diff --git a/spytest/spytest/tgen_api.py b/spytest/spytest/tgen_api.py index 761095ff45a..a2b512c1dd2 100644 --- a/spytest/spytest/tgen_api.py +++ b/spytest/spytest/tgen_api.py @@ -100,3 +100,19 @@ def traffic_action_control(tg_handler, actions=["reset", "clear_stats"]): tg_handler["tg"].tg_traffic_control(action=action, port_handle=tg_port_handler) return tg_handler +def get_min(v1, v2): + return v1 if v1 < v2 else v2 + +def get_max(v1, v2): + return v1 if v1 > v2 else v2 + +def normalize_pps(value): + from spytest import cutils + if not is_soft_tgen(): return value + max_value = cutils.get_env_int("SPYTEST_SCAPY_MAX_RATE_PPS", 100) + return get_min(value, max_value) + +def normalize_mtu(value): + if not is_soft_tgen(): return value + return get_min(value, 9000) + diff --git a/spytest/templates/bcmcmd_l2_show.tmpl b/spytest/templates/bcmcmd_l2_show.tmpl index 8cae7f067a9..a5b2ea8d472 100644 --- a/spytest/templates/bcmcmd_l2_show.tmpl +++ b/spytest/templates/bcmcmd_l2_show.tmpl @@ -6,7 +6,7 @@ Value port (\S+) Value type (\S+|\S+\s+\S+) Start - ^\s*mac=${mac}\s*vlan=${vlan}\s*GPORT=${gport}\s*modid=${modid}\s*port=${port}\s+${type}\s*$$ -> Record - ^\s*mac=${mac}\s*vlan=${vlan}\s*GPORT=${gport}\s*port=${port}\s+${type}\s*$$ -> Record + ^\s*mac=${mac}\s*vlan=${vlan}\s*GPORT=${gport}\s*modid=${modid}\s*port=${port}\s*(${type}){0,1}\s*$$ -> Record + ^\s*mac=${mac}\s*vlan=${vlan}\s*GPORT=${gport}\s*port=${port}\s*(${type}){0,1}\s*$$ -> Record EOF diff --git a/spytest/templates/index b/spytest/templates/index old mode 100644 new mode 100755 index c237e8fa158..26fd163f5b7 --- a/spytest/templates/index +++ b/spytest/templates/index @@ -48,19 +48,22 @@ linux/ip_route_list_dev.tmpl, .*,sonic, /sbin/ip route list dev eth0 linux/ip_route_list_dev.tmpl, .*,sonic, ip route list dev eth0 show_image_list.tmpl, .*, sonic, show image list show_ip_bgp_peer_group.tmpl, .*, sonic, show ip bgp peer-group -show_ip_bgp_peer_group.tmpl, .*, sonic, show ip bgp peer-group .* +show_ip_bgp_peer_group.tmpl, .*, sonic, show bgp all\s+(vrf .*\s+)?peer-group +show_bgp_ipv4v6uni_routes.tmpl, .*, sonic, show bgp ipv(4|6) unicast\s+(vrf .*\s+)?route-map +show_bgp_ipv4v6uni_stats.tmpl, .*, sonic, show bgp ipv(4|6) unicast\s+(vrf .*\s+)?statistics show_ip_bgp_summary.tmpl, .*, sonic, show ip bgp summary show_ip_bgp_summary.tmpl, .*, sonic, show ipv6 bgp summary show_ip_bgp_summary.tmpl, .*, sonic, show bgp ipv4 summary show_ip_bgp_summary.tmpl, .*, sonic, show bgp ipv6 summary show_ip_bgp_summary.tmpl, .*, sonic, show bgp ipv4 unicast summary show_ip_bgp_summary.tmpl, .*, sonic, show bgp ipv6 unicast summary +#show_bgp_ipv4v6uni_prefix.tmpl, .*, sonic, show bgp ipv(4|6) unicast\s*(vrf .*)? show_ip_bgp_summary.tmpl, .*, sonic, show bgp vrf .* summary show_ip_bgp_summary.tmpl, .*, sonic, show ip bgp vrf .* summary show_ip_bgp_summary.tmpl, .*, sonic, show bgp .* unicast vrf .* summary show_bgp_neighbor.tmpl, .*, sonic, show ip bgp vrf .* neighbors.* show_bgp_neighbor.tmpl, .*, sonic, show bgp .* unicast neighbors.* -show_bgp_neighbor.tmpl, .*, sonic, show bgp .* unicast vrf .* neighbors.* +show_bgp_neighbor.tmpl, .*, sonic, show bgp ipv(4|6) unicast vrf .* neighbors.* show_bgp_neighbor.tmpl, .*, sonic, show bgp ipv4 neighbor.* show_ip_interfaces.tmpl, .*, sonic, show ip interface show_ip_interfaces.tmpl, .*, sonic, show ipv6 interface @@ -122,6 +125,8 @@ show_lldp_table.tmpl, .*, sonic, show lldp table # Mirror output command for CLICK CLI show_mirror_session.tmpl, .*, sonic, show mirror_session.* show_arp.tmpl, .*, sonic, show arp +show_ntp_server.tmpl, .*, sonic, show ntp server +show_ntp_global.tmpl, .*, sonic, show ntp global show_ntp.tmpl, .*, sonic, show ntp.* show_ntpstat.tmpl, .*, sonic, ntpstat show_ntpstat.tmpl, .*, sonic, sudo cgexec -g l3mdev:mgmt ntpstat @@ -151,9 +156,14 @@ sonic_installer_list.tmpl, .*, sonic, sonic_installer list sonic_acl_show.tmpl, .*, sonic, aclshow.* sudo_sonic_installer_list.tmpl, .*, sonic, sudo sonic_installer list sudo_sonic_installer_list.tmpl, .*, sonic, show image list +show_bgp_as_path_access_list.tmpl, .*, sonic, show bgp as-path-access-list .* +show_bgp_community_list.tmpl, .*, sonic, show bgp community-list .* +show_bgp_ext_community_list.tmpl, .*, sonic, show bgp ext-community-list .* +show_route_map.tmpl, .*, sonic, show route-map .* show_bgp_neighbor.tmpl, .*, sonic, show bgp neighbor.* show_uptime.tmpl, .*, sonic, show uptime show_ecn.tmpl, .*, sonic, show ecn +show_environment.tmpl, .*, sonic, show environment show_reboot-cause.tmpl, .*, sonic, show reboot-cause show_pfc_counters.tmpl, .*, sonic, show pfc counters show_spanning_tree_vlan_interface.tmpl, .*, sonic, show spanning-tree vlan interface @@ -306,6 +316,7 @@ pddf_thermalutil_gettemp.tmpl, .*, sonic, sudo pddf_thermalutil gettemp show_mirror_session.tmpl, .*, sonic, show mirror-session.* show_classifier_match_type_copp.tmpl, .*, sonic, show class-map match-type copp show_classmap_match_type_fields.tmpl, .*, sonic, show class-map match-type fields +show_classifier_match_type_copp.tmpl, .*, sonic, show classifier match-type copp show_classifier.tmpl, .*, sonic, show classifier show_classifier.tmpl, .*, sonic, show class-map show_policy_type_copp.tmpl, .*, sonic, show policy type copp @@ -432,9 +443,11 @@ show_frr_config.tmpl,.*,sonic, write terminal ospfd show_portchannel_kernel.tmpl, .*, sonic, ip link show dev .* teamdctl_portchannel_state.tmpl, .*, sonic, teamdctl PortChannel.* state teamdctl_portchannel_config.tmpl, .*, sonic, teamdctl PortChannel.* config dump -show_portchannel_summary.tmpl, .*, sonic, show PortChannel summary.* +show_portchannel_summary.tmpl, .*, sonic, show (p|P)ort(c|C)hannel summary.* show_intf_portchannel.tmpl, .*, sonic, show interface PortChannel(\s*|\s+\d+) show_management_vrf.tmpl, .*, sonic, show mgmt-vrf +show_ip_prefix_list.tmpl, .*, sonic, show ip prefix-list .* +show_ipv6_prefix_list.tmpl, .*, sonic, show ipv6 prefix-list .* show_ip_vrf.tmpl, .*, sonic, show ip vrf$ show_ip_dhcp_relay_brief.tmpl, .*, sonic, show ip dhcp-relay brief.* show_ip_dhcp_relay_brief.tmpl, .*, sonic, show ipv6 dhcp-relay brief.* @@ -449,6 +462,7 @@ show_ip_forward_protocol_config.tmpl, .*, sonic, show ip forward_protocol config show_ip_forward_protocol_config.tmpl, .*, sonic, show ip forward-protocol sonic_snmp_yml.tmpl, .*, sonic, cat /etc/sonic/snmp.yml show_snmp_server_details.tmpl, .*, sonic, show snmp-server.* +show_snmp_counters.tmpl, .*, sonic, show snmp counters.* show_portgroup.tmpl, .*, sonic, show portgroup show_ip_static-anycast-gateway.tmpl, .*, sonic, show ip static-anycast-gateway show_ipv6_static-anycast-gateway.tmpl, .*, sonic, show ipv6 static-anycast-gateway @@ -484,6 +498,8 @@ show_portchannel_appdb.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 0 hgetall 'LAG_ show_portchannel_statedb.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 6 hgetall 'LAG_TABLE.* show_redis_cli_stp_iccp.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 6 hgetall '_STP_ICCP.* show_redis_cli_stp_iccp.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 6 hgetall 'STP_FASTAGEING_FLUSH.* +show_redis_cli_transceiver_info.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 6 hgetall "TRANSCEIVER_INFO* +show_redis_cli_dom.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 6 hgetall "TRANSCEIVER_DOM_SENSOR* show_redis-cli_keys.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 8 keys ERROR.* show_redis_error_db.tmpl, .*, sonic, redis-cli(?: -p \d+)? -n 8 hgetall .* show_redis_cli_key_search.tmpl, .*, sonic, redis-cli(?: -p \d+)? HGETALL \S+ @@ -516,7 +532,7 @@ show_redis-cli_keys.tmpl, .*, sonic, sonic-db-cli ERROR_DB keys ERROR.* show_redis_error_db.tmpl, .*, sonic, sonic-db-cli ERROR_DB hgetall .* show_redis_cli_key_search.tmpl, .*, sonic, sonic-db-cli HGETALL \S+ show_redis_cli_key_search.tmpl, .*, sonic, sonic-db-cli \w+ hgetall \S+:oid:\S+ -show_redis_cli_ip_db.tmpl, .*, sonic, sonic-db-cli \w+ hgetall \S+ +show_redis_cli_ip_db.tmpl, .*, sonic, sonic-db-cli \w+ hgetall \S+show_ip_access_group show_redis_cli_key_search.tmpl, .*, sonic, sonic-db-cli \w+ keys \S+ show_redis_cli_key_search.tmpl, .*, sonic, sudo sonic-db-cli \w+ keys \S+ show_redis_cli_key_buffer_pool.tmpl, .*, sonic, sonic-db-cli \w+ Hgetall \S+ @@ -529,6 +545,7 @@ show_udld_statistics.tmpl,.*,sonic,show udld statistics show_udld_statistics.tmpl,.*,sonic,show udld statistics interface \w+ show_queue_watermark_multicast.tmpl, .*, sonic, show queue (watermark|persistent-watermark) multicast .* +show_queue_watermark_unicast.tmpl, .*, sonic, show queue (watermark|persistent-watermark) unicast .* show_priority_group_threshold.tmpl, .*, sonic, show priority-group (watermark|persistent-watermark) (shared|headroom) .* show_queue_watermark_unicast.tmpl, .*, sonic, show queue (watermark|persistent-watermark) unicast .* show_priority_group_watermark.tmpl, .*, sonic, show priority-group (watermark|persistent-watermark) (shared|headroom) .* @@ -546,6 +563,7 @@ show_queue_watermark_cpu.tmpl, .*, sonic, show queue watermark (cpu|CPU) show_buffer_pool_watermark_percentage.tmpl, .*, sonic, show buffer_pool (watermark|persistent-watermark) (-p|--percentage) ethtool_interface.tmpl, .*, sonic, /sbin/ethtool .* show_ip_access_group.tmpl, .*, sonic, show ip access-group +show_ip_arp_interface_ethernet_summary.tmpl, .*, sonic, show ip arp interface (Ethernet|Eth|PortChannel|Vlan|Loopback|Management|Vxlan) ([0-9/:]+) summary show_ip_arp.tmpl, .*, sonic, show ip arp.* show_ipv6_neighbors.tmpl, .*, sonic, show ipv6 neighbors show_priority_group_threshold.tmpl, .*, sonic, show threshold priority-group (shared|headroom) diff --git a/spytest/templates/show_bgp_community_list.tmpl b/spytest/templates/show_bgp_community_list.tmpl index 0df1867d4d8..8d763394207 100644 --- a/spytest/templates/show_bgp_community_list.tmpl +++ b/spytest/templates/show_bgp_community_list.tmpl @@ -1,39 +1,35 @@ -Value Required CommunityType ((Standard)||(Expanded)) -Value Required LIST_NAME ([\w_]+) -Value List PERMITDENY ((permit)|(deny)) -Value MATCHTYPE ((ANY)|(ALL)) +Value Required,Filldown CommunityType ((Standard)||(Expanded)) +Value Required,Filldown LIST_NAME ([\w_]+) +Value Required,Filldown MATCHTYPE ((ANY)|(ALL)) +Value PERMITDENY ((permit)|(deny)) Value List ATTRIBUTE ((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})) Value List ExpAttribute (.*$$) Start - ^\s*((Standard)|(Expanded)) community list.* -> Continue.Record ^\s*${CommunityType} community list ${LIST_NAME}:\s*match: ${MATCHTYPE}$$ - # FIXME: Is appearance of a prompt in the output a bug? - #Prompt: Record the current row. - ^\s*(\-\-sonic.*) -> Next.Record # One attribute or first on a line: - ^\s*${PERMITDENY}\s*${ATTRIBUTE}$$ - ^\s*${ATTRIBUTE}$$ + ^\s*${PERMITDENY}\s*${ATTRIBUTE}$$ -> Record ^\s*${PERMITDENY}\s*${ATTRIBUTE}, -> Continue + ^\s*${ATTRIBUTE}$$ -> Record ^\s*${ATTRIBUTE}, -> Continue # Two attributes or second on a line: - ^\s*(permit|deny)\s*((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),${ATTRIBUTE}$$ - ^\s*((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),${ATTRIBUTE}$$ + ^\s*(permit|deny)\s*((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),${ATTRIBUTE}$$ -> Record + ^\s*((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),${ATTRIBUTE}$$ -> Record ^\s*(permit|deny)\s*((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),${ATTRIBUTE}, -> Continue ^\s*((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),${ATTRIBUTE}, -> Continue # Three attributes or third on a line: - ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){2}${ATTRIBUTE}$$ - ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){2}${ATTRIBUTE}$$ + ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){2}${ATTRIBUTE}$$ -> Record + ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){2}${ATTRIBUTE}$$ -> Record ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){2}${ATTRIBUTE}, -> Continue ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){2}${ATTRIBUTE}, -> Continue # Four attributes or fourth on a line: - ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){3}${ATTRIBUTE}$$ - ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){3}${ATTRIBUTE}$$ + ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){3}${ATTRIBUTE}$$ -> Record + ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){3}${ATTRIBUTE}$$ -> Record ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){3}${ATTRIBUTE}, -> Continue ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){3}${ATTRIBUTE}, -> Continue # Five attributes on a line: - ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){4}${ATTRIBUTE}$$ - ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){4}${ATTRIBUTE}$$ + ^\s*(permit|deny)\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){4}${ATTRIBUTE}$$ -> Record + ^\s*(((no\-export)|(no\-advertise)|(local\-AS)|(local\-as)|(no\-peer)|(\d{1,5}:\d{1,5})),){4}${ATTRIBUTE}$$ -> Record # Expanded (wildcard) attribute - ^\s*${PERMITDENY}\s*${ExpAttribute}$$ - ^\s*${ExpAttribute}$$ + ^\s*${PERMITDENY}\s*${ExpAttribute}$$ -> Record + ^\s*${ExpAttribute}$$ -> Record \ No newline at end of file diff --git a/spytest/templates/show_bgp_ext_community_list.tmpl b/spytest/templates/show_bgp_ext_community_list.tmpl index 123919ff0f8..89199af0696 100644 --- a/spytest/templates/show_bgp_ext_community_list.tmpl +++ b/spytest/templates/show_bgp_ext_community_list.tmpl @@ -1,39 +1,35 @@ -Value Required CommunityType ((Standard)|(Expanded)) -Value Required LIST_NAME ([\w_]+) -Value MATCHTYPE ((ANY)|(ALL)) -Value List PERMITDENY ((permit)|(deny)) +Value Required,Filldown CommunityType ((Standard)|(Expanded)) +Value Required,Filldown LIST_NAME ([\w_]+) +Value Required,Filldown MATCHTYPE ((ANY)|(ALL)) +Value PERMITDENY ((permit)|(deny)) Value List ATTRIBUTE (((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}) Value List ExpAttribute (.*$$) Start - ^\s*((Standard)|(Expanded)) extended community list.* -> Continue.Record ^\s*${CommunityType} extended community list ${LIST_NAME}:\s*match: ${MATCHTYPE}$$ - # FIXME: Is appearance of a prompt in the output a bug? - #Prompt: Record the current row. - ^\s*(\-\-sonic.*) -> Next.Record # One attribute or first on a line: - ^\s*${PERMITDENY}\s*${ATTRIBUTE}$$ -> Next - ^\s*${ATTRIBUTE}$$ -> Next + ^\s*${PERMITDENY}\s*${ATTRIBUTE}$$ -> Record ^\s*${PERMITDENY}\s*${ATTRIBUTE}, -> Continue + ^\s*${ATTRIBUTE}$$ -> Record ^\s*${ATTRIBUTE}, -> Continue # Two attributes or second on a line (capture: - ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),${ATTRIBUTE}$$ -> Next - ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),${ATTRIBUTE}$$ -> Next - ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),${ATTRIBUTE}, -> Continue - ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),${ATTRIBUTE}, -> Continue + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*)${ATTRIBUTE}$$ -> Record + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*)${ATTRIBUTE}, -> Continue + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*)*${ATTRIBUTE}$$ -> Record + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*)*${ATTRIBUTE}, -> Continue # Three attributes or third on a line: - ^\s*(permit|deny)\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){2}${ATTRIBUTE}$$ -> Next - ^\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){2}${ATTRIBUTE}$$ -> Next - ^\s*(permit|deny)\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){2}${ATTRIBUTE}, -> Continue - ^\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){2}${ATTRIBUTE}, -> Continue + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){2}${ATTRIBUTE}$$ -> Record + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){2}${ATTRIBUTE}, -> Continue + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){2}${ATTRIBUTE}$$ -> Record + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){2}${ATTRIBUTE}, -> Continue # Four attributes or fourth on a line: - ^\s*(permit|deny)\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){3}${ATTRIBUTE}$$ -> Next - ^\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){3}${ATTRIBUTE}$$ -> Next - ^\s*(permit|deny)\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){3}${ATTRIBUTE}, -> Continue - ^\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){3}${ATTRIBUTE}, -> Continue + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){3}${ATTRIBUTE}$$ -> Record + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){3}${ATTRIBUTE}, -> Continue + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){3}${ATTRIBUTE}$$ -> Record + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){3}${ATTRIBUTE}, -> Continue # Five attributes on a line: - ^\s*(permit|deny)\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){4}${ATTRIBUTE}$$ -> Next - ^\s*((((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5}),){4}${ATTRIBUTE}$$ -> Next - ^\s*${PERMITDENY}\s*${ExpAttribute}$$ - ^\s*${ExpAttribute}$$ - + ^\s*(permit|deny)\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){4}${ATTRIBUTE}$$ -> Record + ^\s*(((rt)|(soo)):(((\d{1,3}\.){3}\d{1,3})|(\d{1,5})):\d{1,5},\s*){4}${ATTRIBUTE}$$ -> Record + # Expanded (wildcard) attribute + ^\s*${PERMITDENY}\s*${ExpAttribute}$$ -> Record + ^\s*${ExpAttribute}$$ -> Record \ No newline at end of file diff --git a/spytest/templates/show_bgp_ipv6.tmpl b/spytest/templates/show_bgp_ipv6.tmpl index 6386b45de75..0e5251d7fab 100644 --- a/spytest/templates/show_bgp_ipv6.tmpl +++ b/spytest/templates/show_bgp_ipv6.tmpl @@ -10,6 +10,7 @@ Value STATUS_CODE ([sdhirSR*>=#]*) Value INTERNAL ([ie\?]?) Value Filldown VRF (\S+) Value Filldown AS_NUM (\d+) +Value ORIGIN ([ie\?]?) Start @@ -22,11 +23,11 @@ Start Bgp_table ^\s*${STATUS_CODE}\s+${INTERNAL}\s+${NEXT_HOP}\s*$$ -> Continue ^\s{1,45}${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record - ^\s*${STATUS_CODE}\s{1,16}${INTERNAL}${NETWORK}\s+${NEXT_HOP}\s*$$ -> Continue + ^\s*${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NETWORK}\s+${NEXT_HOP}\s*$$ -> Continue ^\s{1,45}${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record - ^\s*${STATUS_CODE}\s{1,16}${INTERNAL}${NETWORK}\s+${NEXT_HOP}\s+${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record + ^\s*${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NETWORK}\s+${NEXT_HOP}\s+${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+\(*${AS_PATH}\)*\s+${ORIGIN}\s*$$ -> Record ^\s*${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NEXT_HOP}\s+${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record - ^\s*${STATUS_CODE}\s+${INTERNAL}${NETWORK}\s+${NEXT_HOP}\s+\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record + ^\s*${STATUS_CODE}\s+${INTERNAL}\s+${NETWORK}\s+${NEXT_HOP}\s+\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record ^\s*${STATUS_CODE}\s+${NETWORK}\s+${NEXT_HOP}\s+${METRIC}\s+${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s+${INTERNAL}\s*$$ -> Record ^\s*${STATUS_CODE}\s+${NETWORK}\s+${NEXT_HOP}\s+${METRIC}\s+${LOCAL_PREF}\s+${WEIGHT}\s+${INTERNAL}\s*$$ -> Record ^\s*${STATUS_CODE}\s+${NETWORK}\s+${NEXT_HOP}\s+${METRIC}\s+${WEIGHT}\s+${AS_PATH}\s+${INTERNAL}\s*$$ -> Record @@ -35,3 +36,4 @@ Bgp_table ^\s*${STATUS_CODE}\s+${NETWORK}\s+${NEXT_HOP}\s+${METRIC}\s+${INTERNAL}\s*$$ -> Record EOF + diff --git a/spytest/templates/show_ip_bgp.tmpl b/spytest/templates/show_ip_bgp.tmpl index c9b8ce99269..f7024c27d78 100644 --- a/spytest/templates/show_ip_bgp.tmpl +++ b/spytest/templates/show_ip_bgp.tmpl @@ -9,6 +9,7 @@ Value ROUTER_ID (\S{0,19}) Value VRF_ID (\S+) Value STATUS_CODE ([sdhirSR*>=#]*) Value INTERNAL (i?) +Value ORIGIN ([ie\?]?) Start ^BGP table version is ${VERSION}, local router ID is ${ROUTER_ID}, vrf id ${VRF_ID}\s*$$ -> VTYSH @@ -27,10 +28,10 @@ KLISH ^Origin codes:.*$$ -> Continue Bgp_table - ^${STATUS_CODE}\s{1,16}${INTERNAL}${NETWORK}\s+${NEXT_HOP}\s*$$ -> Continue + ^${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NETWORK}\s+${NEXT_HOP}\s*$$ -> Continue ^\s{20}\s+${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record - ^${STATUS_CODE}\s{1,16}${INTERNAL}${NETWORK}\s+${NEXT_HOP}\s{1,18}${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record - ^${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NEXT_HOP}\s+${METRIC}\s{0,9}${LOCAL_PREF}\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record + ^${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NETWORK}\s+${NEXT_HOP}\s{1,18}${METRIC}\s{1,15}${LOCAL_PREF}\s+${WEIGHT}\s+\(*${AS_PATH}\)*\s+${ORIGIN}\s*$$ -> Record + ^${STATUS_CODE}\s{1,16}${INTERNAL}\s+${NEXT_HOP}\s+${METRIC}\s{1,15}${LOCAL_PREF}\s+${WEIGHT}\s+\(*${AS_PATH}\)*\s+${ORIGIN}\s*$$ -> Record ^${STATUS_CODE}\s+${INTERNAL}${NETWORK}\s+${NEXT_HOP}\s+\s+${WEIGHT}\s+${AS_PATH}\s*$$ -> Record ^\s+${NETWORK}\s+${AS_PATH}\s*$$ -> Record diff --git a/spytest/templates/show_ip_bgp_peer_group.tmpl b/spytest/templates/show_ip_bgp_peer_group.tmpl index 681bf0023b5..07a575c1fe9 100755 --- a/spytest/templates/show_ip_bgp_peer_group.tmpl +++ b/spytest/templates/show_ip_bgp_peer_group.tmpl @@ -1,18 +1,14 @@ Value Required PGNAME (\w+) Value REMOTEASN (\d+) Value List CONFIGAF (IPv\d Unicast) -Value List PGMEMBER ([\:\d\w\.]+) +Value List PGMEMBER ([\:\d\w\.\/]+) Value List PGMEMSTATE ((Active)|(Idle)) - - Start ^\s*BGP peer\-group ${PGNAME}, remote AS ${REMOTEASN} ^\s*BGP peer\-group ${PGNAME} ^\s*Configured address-families: ${CONFIGAF} -> Continue ^.*Configured address-families: IPv\d Unicast; ${CONFIGAF} ^\s*Peer-group\smembers: -> Next PGparse - PGparse ^\s*$$ -> Next.Record Start - ^\s*${PGMEMBER}\s${PGMEMSTATE} - + ^\s*${PGMEMBER}\s${PGMEMSTATE} \ No newline at end of file diff --git a/spytest/templates/show_ip_route.tmpl b/spytest/templates/show_ip_route.tmpl index e3b791a181b..266a3c7e787 100644 --- a/spytest/templates/show_ip_route.tmpl +++ b/spytest/templates/show_ip_route.tmpl @@ -17,21 +17,26 @@ Start ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*is directly connected,\s*${INTERFACE},\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*is directly connected,\s*${INTERFACE}\(vrf\s${dest_vrf_name}\),\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*is directly connected,\s*${INTERFACE},\s*${DURATION}\s*$$ -> Record + ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*is directly connected,\s*${INTERFACE}\s*inactive,\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE},\s*${DURATION}\s*$$ -> Record ^\s*${FIB}\s*via\s*${NEXTHOP},\s*${INTERFACE},\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE}\s*onlink,\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE}\s*inactive\s*onlink,\s*${DURATION}\s*$$ -> Record + ^\s*${TYPE}${SELECTED}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE}\s*inactive\s*onlink,\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*${NEXTHOP}\s*\(${INTERFACE}\),\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}${NOT_INSTALLED}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE},\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP}\s*${INTERFACE},\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}${NOT_INSTALLED}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE}(\(vrf\s*${dest_vrf_name}\))*\s*${NH_TYPE},\s*${DURATION}\s*$$ -> Record + ^\s*${TYPE}${SELECTED}${FIB}${NOT_INSTALLED}\s*${IP_ADDRESS}\s*\[${DISTANCE}\/${COST}\]\s*via\s*${NEXTHOP},\s*${INTERFACE}(\(vrf\s*${dest_vrf_name}\))*\s*${NH_TYPE},\s*${DURATION}\s*$$ -> Record ^\s*Codes:.* -> Clearall - ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*via\s*${NEXTHOP}\s*${INTERFACE}(\(vrf\s*${dest_vrf_name}\))*\s*${DISTANCE}\/${COST}\s*${DURATION}\s* -> Record - ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*Direct\s*${INTERFACE}(\(vrf\s*${dest_vrf_name}\))*\s*${DISTANCE}\/${COST}\s*${DURATION}\s* -> Record + ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*via\s*${NEXTHOP}\s*${INTERFACE}\s*${DISTANCE}\/${COST}\s*${DURATION}\s* -> Record + ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*Direct\s*${INTERFACE}\s*${DISTANCE}\/${COST}\s*${DURATION}\s* -> Record + ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*via\s*${NEXTHOP}\s*${INTERFACE}(\(vrf\s*${dest_vrf_name}\))\s*${DISTANCE}\/${COST}\s*${DURATION}\s* -> Record + ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*Direct\s*${INTERFACE}(\(vrf\s*${dest_vrf_name}\))\s*${DISTANCE}\/${COST}\s*${DURATION}\s* -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*${NEXTHOP}\s*\(${INTERFACE}\)\s*${DISTANCE}\/${COST}\s*${DURATION}\s*$$ -> Record ^\s*${TYPE}${SELECTED}${FIB}\s*${IP_ADDRESS}\s*via\s*${NEXTHOP}\s*${INTERFACE}\s* -> Record ^\s*${FIB}\s*via\s*${NEXTHOP}\s*${INTERFACE}\s*$$ -> Record + ^\s*Direct\s*${INTERFACE}\s*$$ -> Record ^\s*via\s*${NEXTHOP}\s*${INTERFACE}\s*$$ -> Record ^\s*via\s*${NEXTHOP}$$ -> Record - ^$$ - + ^$$ \ No newline at end of file diff --git a/spytest/templates/show_mac_dampening.tmpl b/spytest/templates/show_mac_dampening.tmpl new file mode 100644 index 00000000000..c8539b4b319 --- /dev/null +++ b/spytest/templates/show_mac_dampening.tmpl @@ -0,0 +1,4 @@ +Value count (\d+) + +Start + ^\s*MAC\s+Move\s+Dampening\s+Threshold\s*:\s*${count}\s*$$ -> Record \ No newline at end of file diff --git a/spytest/templates/show_mac_dampening_disabled_ports.tmpl b/spytest/templates/show_mac_dampening_disabled_ports.tmpl index 41bdd226939..fb707079057 100644 --- a/spytest/templates/show_mac_dampening_disabled_ports.tmpl +++ b/spytest/templates/show_mac_dampening_disabled_ports.tmpl @@ -2,6 +2,5 @@ Value List port_list (\S+) Start ^\s*Ports\s+disabled\s+due\s+to\s+MAC\s+Dampening:\s*$$ - ^\S+@\S+:.*$$ -> Record - ^\s*${port_list}\s*$$ + ^${port_list}$$ diff --git a/spytest/templates/show_ntp_global.tmpl b/spytest/templates/show_ntp_global.tmpl new file mode 100644 index 00000000000..991974f91e8 --- /dev/null +++ b/spytest/templates/show_ntp_global.tmpl @@ -0,0 +1,11 @@ +Value source_intf (\S+) +Value vrf (mgmt|default) + +Start + ^\s*\-+\s*$$ + ^\s*NTP\s*Global\s*Configuration\s*$$ + ^\s*\-+\s*$$ + ^\s*NTP\s*source\-interface\s*\:\s*${source_intf}\s*$$ + ^\s*NTP\s*vrf\s*\:\s*${vrf}\s*$$ -> Record + +EOF diff --git a/spytest/templates/show_ntp_server.tmpl b/spytest/templates/show_ntp_server.tmpl new file mode 100644 index 00000000000..c634a59c746 --- /dev/null +++ b/spytest/templates/show_ntp_server.tmpl @@ -0,0 +1,9 @@ +Value Filldown server ([\d+|\.|\:]+) + +Start + ^\s*\-+\s*$$ + ^\s*NTP\s*Servers\s*$$ + ^\s*\-+\s*$$ + ^\s*${server}\s*$$ -> Record + +EOF \ No newline at end of file diff --git a/spytest/templates/show_route_map.tmpl b/spytest/templates/show_route_map.tmpl index 57b92fe2e11..0a05d90efd6 100644 --- a/spytest/templates/show_route_map.tmpl +++ b/spytest/templates/show_route_map.tmpl @@ -1,8 +1,8 @@ Value Filldown,Required MAP_NAME ([\w\-_]+) Value Required FILTER_TYPE ((permit)|(deny)) Value SEQ_NUM (\d{1,5}) -Value List MATCH_CLAUSE ([\w\-\s_\.]+) -Value List SET_CLAUSE ([\w\-\s_\.\:\,]+) +Value List MATCH_CLAUSE ([\w\-\s_\.\/]+) +Value List SET_CLAUSE ([\w\-\s_\.\:\,\/]+) Value List CALL_CLAUSE ([\w\-\s_\.]+) # If more actions are defined later, add them by appending "|(next_action)" # after "(current_last_defined_action)". @@ -21,7 +21,7 @@ MatchList ^\s*Set clauses:.* -> Next.NoRecord SetList ^\s*${MATCH_CLAUSE} -SetList +SetList ^\s*Call clauses:.* -> Next.NoRecord CallList ^\s*${SET_CLAUSE} @@ -29,4 +29,4 @@ SetList # also checking for conditions requiring recording of the current row. CallList ^\s*Actions:.* -> Next.NoRecord Start - ^\s*${CALL_CLAUSE} + ^\s*${CALL_CLAUSE} \ No newline at end of file diff --git a/spytest/templates/show_sflow.tmpl b/spytest/templates/show_sflow.tmpl index c5519e3f425..557c42589da 100644 --- a/spytest/templates/show_sflow.tmpl +++ b/spytest/templates/show_sflow.tmpl @@ -1,11 +1,11 @@ -Value State (\S+) -Value Polling_Interval (\S+) -Value Collectors_Cnt (\d+) +Value Filldown State (\S+) +Value Filldown Polling_Interval (\S+) +Value Filldown Collectors_Cnt (\d+) Value Collector_Name (\S+) Value Collector_IP (\S+) Value Collector_Port (\d+) Value Collector_Vrf (\S+) -Value Agent_ID (\S+) +Value Filldown Agent_ID (\S+) Start @@ -27,4 +27,5 @@ KLISH ^\s*configured\s+collectors:\s+${Collectors_Cnt}\s*$$ -> Record ^\s*${Collector_IP}\s+${Collector_Port}\s+${Collector_Vrf}\s*$$ -> Record + EOF diff --git a/spytest/templates/show_snmp_counters.tmpl b/spytest/templates/show_snmp_counters.tmpl new file mode 100644 index 00000000000..a57725c1467 --- /dev/null +++ b/spytest/templates/show_snmp_counters.tmpl @@ -0,0 +1,36 @@ +Value snmp_packets_input (\d+) +Value snmp_version_errors (\d+) +Value unknown_community_name (\d+) +Value illegal_operation (\d+) +Value encoding_errors (\d+) +Value requested_variables (\d+) +Value altered_variables (\d+) +Value get_request_pdus (\d+) +Value get_next_pdus (\d+) +Value set_request_pdus (\d+) +Value snmp_packets_output (\d+) +Value too_big_errors (\d+) +Value no_name_errors (\d+) +Value bad_values_errors (\d+) +Value general_errors (\d+) +Value response_pdus (\d+) +Value trap_pdus (\d+) + +Start + ^\s*${snmp_packets_input}\s*SNMP packets input$$ -> Start + ^\s*${snmp_version_errors}\s*Bad SNMP version errors$$ -> Start + ^\s*${unknown_community_name}\s*Unknown community name$$ -> Start + ^\s*${illegal_operation}\s*Illegal operation for community name supplied$$ -> Start + ^\s*${encoding_errors}\s*Encoding errors$$ -> Start + ^\s*${requested_variables}\s*Number of requested variables$$ -> Start + ^\s*${altered_variables}\s*Number of altered variables$$ -> Start + ^\s*${get_request_pdus}\s*Get-request PDUs$$ -> Start + ^\s*${get_next_pdus}\s*Get-next PDUs$$ -> Start + ^\s*${set_request_pdus}\s*Set-request PDUs$$ -> Start + ^\s*${snmp_packets_output}\s*SNMP packets output$$ -> Start + ^\s*${too_big_errors}\s*Too big errors \(Maximum packet size 1500\)$$ -> Start + ^\s*${no_name_errors}\s*No such name errors$$ -> Start + ^\s*${bad_values_errors}\s*Bad values errors$$ -> Start + ^\s*${general_errors}\s*General errors$$ -> Start + ^\s*${response_pdus}\s*Response PDUs$$ -> Start + ^\s*${trap_pdus}\s*Trap PDUs$$ -> Record \ No newline at end of file diff --git a/spytest/templates/show_snmp_system_memory.tmpl b/spytest/templates/show_snmp_system_memory.tmpl deleted file mode 100644 index c37ca6f05f0..00000000000 --- a/spytest/templates/show_snmp_system_memory.tmpl +++ /dev/null @@ -1,8 +0,0 @@ -Value Used (\d+) -value Free (\d+) -Value Total (\d+) - -Start -^UCD-SNMP-MIB::memTotalReal.0\s+=\s+\S+\s+{Total}\s+kB -^UCD-SNMP-MIB::memAvailReal.0\s+=\s+\S+\s+{Used}\s+kB -^UCD-SNMP-MIB::memTotalFree.0\s+=\s+\S+\s+{Free}\s+kB diff --git a/spytest/testbeds/sonic_builds.yaml b/spytest/testbeds/sonic_builds.yaml index 3eb269f8275..19698cdb943 100644 --- a/spytest/testbeds/sonic_builds.yaml +++ b/spytest/testbeds/sonic_builds.yaml @@ -23,15 +23,23 @@ buzznik_plus : current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1/broadcom/LATEST_BUILD_NGCOV/sonic-broadcom-cloud-advanced.bin #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1/broadcom/GA_BUILD/sonic-broadcom.bin +buzznik_plus_mr : + current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1.1/broadcom/LATEST_BUILD_NGCOV/sonic-broadcom-cloud-advanced.bin + #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1.1/broadcom/GA_BUILD/sonic-broadcom.bin + daily : - current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1/broadcom/LATEST_BUILD_NGCOV/sonic-broadcom-cloud-advanced.bin - #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1/broadcom/GA_BUILD/sonic-broadcom.bin + current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1.1/broadcom/LATEST_BUILD_NGCOV/sonic-broadcom-cloud-advanced.bin + #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1.1/broadcom/GA_BUILD/sonic-broadcom.bin daily_gcov : - current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1/broadcom/LATEST_BUILD_GCOV/sonic-broadcom-cloud-advanced.bin - #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1/broadcom/GA_BUILD/sonic-broadcom.bin + current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1.1/broadcom/LATEST_BUILD_GCOV/sonic-broadcom-cloud-advanced.bin + #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/3.1.1/broadcom/GA_BUILD/sonic-broadcom.bin community : current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/community/LATEST_BUILD/sonic-broadcom.bin #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/community/GA_BUILD/sonic-broadcom.bin +upstream : + current: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/upstream_1.0/broadcom/LATEST_BUILD_NGCOV/sonic-broadcom.bin + #restore: http://10.59.132.240:9009/projects/csg_sonic/sonic_builds/daily/upstream_1.0/broadcom/GA_BUILD/sonic-broadcom.bin + diff --git a/spytest/testbeds/sonic_services.yaml b/spytest/testbeds/sonic_services.yaml index 1593d8604b6..3ce422be877 100644 --- a/spytest/testbeds/sonic_services.yaml +++ b/spytest/testbeds/sonic_services.yaml @@ -51,11 +51,11 @@ tacacs: timeout: 5, passkey: lvl7india, tcp_port: 49, auth_type: pap, priority: 8}] radius: - hosts: [{ip: 10.52.140.51, username: test, password: test, + hosts: [{ip: 10.52.145.181, username: test, password: test, timeout: 5, passkey: Lvl7india, udp_port: 1812, auth_type: pap, priority: 8}, {ip: '10.52.1.1', username: root, password: lvl7india, timeout: 5, passkey: lvl7india, udp_port: 1812, auth_type: pap, priority: 2}, - {ip: '10.52.145.181', username: root, password: lvl7india, + {ip: '10.52.140.51', username: root, password: lvl7india, timeout: 5, passkey: lvl7india, udp_port: 1812, auth_type: pap, priority: 3}, {ip: "4001::1", username: root, password: lvl7india, timeout: 5, passkey: lvl7india, udp_port: 1812, auth_type: pap, priority: 4}, @@ -81,7 +81,7 @@ radius: logs_path: {path: '/var/log/auth.log'} cisco_acs_server: {ip: 10.52.136.77, username: qauser, password: qauser123, passkey: lvl7india} radius_conf_files: {path: '/usr/share/sonic/templates/radius_nss.conf.j2', backup_path: '/usr/share/sonic/templates/radius_nss_bkp.conf.j2'} - ro_user: {username: test1, password: password} + ro_user: {username: test1, password: password, password1: test1} syslog: ip: 192.168.1.1 @@ -92,7 +92,7 @@ syslog: ntp: default: [216.239.35.4, 216.239.35.8, 216.239.35.12] - host: [162.246.94.71,192.111.144.114,47.190.36.230] + host: [50.205.244.20, 104.194.8.227, 50.205.244.107] smtp: host: smtphost.broadcom.com @@ -189,5 +189,4 @@ dhcp_server_vxlan: dhcp_server_ip: 172.16.40.210 dhcp_server_ipv6: 2072::210 username: root - password: broadcom - + password: broadcom \ No newline at end of file diff --git a/spytest/tests/qos/acl/acl_json_config.py b/spytest/tests/qos/acl/acl_json_config.py index 6412af0be13..5f722ff6ddc 100644 --- a/spytest/tests/qos/acl/acl_json_config.py +++ b/spytest/tests/qos/acl/acl_json_config.py @@ -1172,7 +1172,7 @@ "IP_PROTOCOL":"17" }, "SNMP_SSH|RULE_1":{ - "PRIORITY":"9999", + "PRIORITY":"9997", "PACKET_ACTION":"ACCEPT", "SRC_IP":"", "IP_PROTOCOL":"17" @@ -1191,7 +1191,7 @@ "IP_PROTOCOL":"6" }, "V6_SSH_ONLY|DEFAULT_RULE100":{ - "PRIORITY":"1", + "PRIORITY":"3", "PACKET_ACTION":"DROP", "L4_DST_PORT":"22", "ETHER_TYPE":"0x86dd" @@ -1199,7 +1199,7 @@ "V6_SSH_ONLY|RULE_1":{ "IP_PROTOCOL":"6", "PACKET_ACTION":"ACCEPT", - "PRIORITY":"9999", + "PRIORITY":"9996", "L4_DST_PORT":"22", "SRC_IPV6":"" } diff --git a/spytest/tests/qos/acl/test_acls.py b/spytest/tests/qos/acl/test_acls.py index 18fc751f4ee..080514aade7 100644 --- a/spytest/tests/qos/acl/test_acls.py +++ b/spytest/tests/qos/acl/test_acls.py @@ -3,7 +3,6 @@ import json from spytest import st, tgapi, SpyTestDict -from spytest.utils import random_vlan_list import apis.switching.vlan as vlan_obj import apis.qos.acl as acl_obj @@ -70,7 +69,7 @@ def get_handles(): def apply_module_configuration(): print_log("Applying module configuration") - data.vlan = str(random_vlan_list()[0]) + data.vlan = str(utils.random_vlan_list()[0]) data.dut1_lag_members = [vars.D1D2P1, vars.D1D2P2] data.dut2_lag_members = [vars.D2D1P1, vars.D2D1P2] @@ -123,8 +122,8 @@ def clear_module_configuration(): ]) # delete portchannel utils.exec_all(True, [ - utils.ExecAllFunc(pc_obj.delete_portchannel, vars.D1, data.portChannelName, data.cli_type), - utils.ExecAllFunc(pc_obj.delete_portchannel, vars.D2, data.portChannelName, data.cli_type), + utils.ExecAllFunc(pc_obj.delete_portchannel, vars.D1, data.portChannelName), + utils.ExecAllFunc(pc_obj.delete_portchannel, vars.D2, data.portChannelName), ]) # delete vlan utils.exec_all(True, [ @@ -210,7 +209,7 @@ def verify_packet_count(tx, tx_port, rx, rx_port, table): tg_tx = data.tgmap[tx] tg_rx = data.tgmap[rx] exp_ratio = 0 - action = "DROP" + #action = "DROP" attr_list = [] traffic_details = dict() action_list = [] @@ -235,7 +234,7 @@ def verify_packet_count(tx, tx_port, rx, rx_port, table): attr_list.append(attr) action_list.append(action) result_all = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', - comp_type='packet_count', return_all=1, delay_factor=1.2) + comp_type='packet_count', return_all=1, delay_factor=1, retry=1) for result1, action, attr in zip(result_all[1], action_list, attr_list): result = result and result1 if result1: @@ -971,10 +970,9 @@ def test_ft_mac_acl_prioirty_ingress(): st.wait(2) transmit('tg1') #transmit('tg2') - print_log('Check acl priority to verify packets are forwarded when MAC and IPv4 ACLs rules are in "forward" ') + print_log('Check acl priority to verify packets are forwarded when MAC and IPv4 ACLs rules are in "forward"') result1 = verify_packet_count('tg1', vars.T1D1P1, 'tg2', vars.T1D2P1, "L3_IPV4_INGRESS|rule1") - print_log('Check acl priority to verify packets are dropped when MAC acl rule is forward and IPv4 ACL \ - rule is in "drop" ') + print_log('Check acl priority to verify packets are dropped when MAC acl rule is forward and IPv4 ACL rule is in "drop"') result2 = verify_packet_count('tg1', vars.T1D1P1, 'tg2', vars.T1D2P1, "L3_IPV4_INGRESS|rule4") verify_acl_hit_counters(vars.D1, "L2_MAC_INGRESS", acl_type="mac") print_log('Verify ACL hit counters on IPv4)" ') @@ -1000,12 +998,10 @@ def test_ft_mac_acl_prioirty_egress(): acl_obj.apply_acl_config(vars.D1, acl_config) st.wait(2) transmit('tg2') - print_log('Check acl priority to verify packets are dropped when MAC rule is drop and \ - IPv4 ACLs rules are in "forward" ') + print_log('Check acl priority to verify packets are dropped when MAC rule is drop and IPv4 ACLs rules are in "forward"') verify_packet_count('tg2', vars.T1D2P1,'tg1', vars.T1D1P1, "L3_IPV4_EGRESS|rule1") result2 = verify_packet_count('tg2', vars.T1D2P1, 'tg1', vars.T1D1P1, "L2_MAC_EGRESS|rule1") - print_log('Check acl priority to verify packets are dropped when MAC rule is drop and \ - IPv4 ACLs rules are in "drop" ') + print_log('Check acl priority to verify packets are dropped when MAC rule is drop and IPv4 ACLs rules are in "drop"') result3 = verify_packet_count('tg2', vars.T1D2P1,'tg1', vars.T1D1P1, "L3_IPV4_EGRESS|rule2") print_log('Verify ACL hit counters on IPv4)" ') result4 = verify_acl_hit_counters(vars.D1,"L2_MAC_EGRESS", acl_type="mac") @@ -1103,7 +1099,7 @@ def test_ft_acl_gnmi(): if not gnmi_set_out: st.report_fail("error_string_found", ' ', ' ') gnmi_get_out = gnmiapi.gnmi_get(vars.D1, xpath) - if "rpc error:" in gnmi_get_out: + if "rpc error:" in str(gnmi_get_out): st.report_fail("error_string_found", 'rpc error:', ' ') transmit('tg1') result1 = verify_packet_count('tg1', vars.T1D1P1, 'tg2', vars.T1D2P1, "L3_IPV4_INGRESS|rule2") diff --git a/spytest/tests/qos/acl/test_acls_l3_forwarding.py b/spytest/tests/qos/acl/test_acls_l3_forwarding.py new file mode 100644 index 00000000000..084e1a4aa08 --- /dev/null +++ b/spytest/tests/qos/acl/test_acls_l3_forwarding.py @@ -0,0 +1,378 @@ +import pprint +import pytest +import json + +from spytest import st, tgapi, SpyTestDict + +import apis.qos.acl as acl_obj +import tests.qos.acl.acl_json_config as acl_data +import tests.qos.acl.acl_utils as acl_utils +import apis.switching.portchannel as pc_obj +import apis.routing.ip as ip_obj +import apis.routing.arp as arp_obj +import apis.system.basic as basic_obj +import utilities.common as utils +from utilities.parallel import ensure_no_exception +pp = pprint.PrettyPrinter(indent=4) + +vars = dict() +data = SpyTestDict() +data.rate_pps = 100 +data.pkts_per_burst = 10 +data.tx_timeout = 2 +data.TBD = 10 +data.portChannelName = "PortChannel111" +data.tg_type = 'ixia' +data.ipv4_address_D1 = "1.1.1.1" +data.ipv4_address_D2 = "2.2.2.1" +data.ipv4_portchannel_D1 = "192.168.1.1" +data.ipv4_portchannel_D2 = "192.168.1.2" +data.ipv4_network_D1 = "1.1.1.0/24" +data.ipv4_network_D2 = "2.2.2.0/24" +data.ipv6_address_D1 = "1001::1" +data.ipv6_address_D2 = "2001::1" +data.ipv6_portchannel_D1 = "3001::1" +data.ipv6_portchannel_D2 = "3001::2" +data.ipv6_network_D1 = "1001::0/64" +data.ipv6_network_D2 = "2001::0/64" +data.acl_type = "ipv6" +def print_log(msg): + log_start = "\n================================================================================\n" + log_end = "\n================================================================================" + st.log("{} {} {}".format(log_start, msg, log_end)) + + +def get_handles(): + ''' + ######################## Topology ############################ + + +---------+ +-------+ + | +------------------+ | + TG1 -----| DUT1 | portchannel | DUT2 +----- TG2 + | +------------------+ | + +---------+ +-------+ + + ############################################################## + ''' + global vars + vars = st.ensure_min_topology("D1D2:2", "D1T1:1", "D2T1:1") + tg1, tg_ph_1 = tgapi.get_handle_byname("T1D1P1") + tg2, tg_ph_2 = tgapi.get_handle_byname("T1D2P1") + if tg1.tg_type == 'stc': data.tg_type = 'stc' + tg1.tg_traffic_control(action="reset", port_handle=tg_ph_1) + tg2.tg_traffic_control(action="reset", port_handle=tg_ph_2) + return (tg1, tg2, tg_ph_1, tg_ph_2) + + +def apply_module_configuration(): + print_log("Applying module configuration") + + data.dut1_lag_members = [vars.D1D2P1, vars.D1D2P2] + data.dut2_lag_members = [vars.D2D1P1, vars.D2D1P2] + + # create portchannel + utils.exec_all(True, [ + utils.ExecAllFunc(pc_obj.create_portchannel, vars.D1, data.portChannelName), + utils.ExecAllFunc(pc_obj.create_portchannel, vars.D2, data.portChannelName), + ]) + + # add portchannel members + utils.exec_all(True, [ + utils.ExecAllFunc(pc_obj.add_portchannel_member, vars.D1, data.portChannelName, data.dut1_lag_members), + utils.ExecAllFunc(pc_obj.add_portchannel_member, vars.D2, data.portChannelName, data.dut2_lag_members), + ]) + + +def clear_module_configuration(): + print_log("Clearing module configuration") + # delete Ipv4 address + print_log("Delete ip address configuration:") + ip_obj.clear_ip_configuration([vars.D1, vars.D2], family='ipv4') + # delete Ipv6 address + ip_obj.clear_ip_configuration([vars.D1, vars.D2], family='ipv6') + # delete ipv4 static routes + ip_obj.delete_static_route(vars.D1, data.ipv4_portchannel_D2, data.ipv4_network_D2, shell="vtysh", + family="ipv4") + ip_obj.delete_static_route(vars.D2, data.ipv4_portchannel_D1, data.ipv4_network_D1, shell="vtysh", + family="ipv4") + # delete ipv6 static routes + ip_obj.delete_static_route(vars.D1, data.ipv6_portchannel_D2, data.ipv6_network_D2, shell="vtysh", + family="ipv6") + ip_obj.delete_static_route(vars.D2, data.ipv6_portchannel_D1, data.ipv6_network_D1, shell="vtysh", + family="ipv6") + # delete port channel members + print_log("Deleting members from port channel:") + utils.exec_all(True, [ + utils.ExecAllFunc(pc_obj.delete_portchannel_member, vars.D1, data.portChannelName, data.dut1_lag_members), + utils.ExecAllFunc(pc_obj.delete_portchannel_member, vars.D2, data.portChannelName, data.dut2_lag_members), + ]) + # delete port channel + print_log("Deleting port channel configuration:") + utils.exec_all(True, [ + utils.ExecAllFunc(pc_obj.delete_portchannel, vars.D1, data.portChannelName), + utils.ExecAllFunc(pc_obj.delete_portchannel, vars.D2, data.portChannelName), + ]) + # delete acl tables and rules + print_log("Deleting ACLs:") + + [_, exceptions] = utils.exec_all(True, [[acl_obj.acl_delete, vars.D1], [acl_obj.acl_delete, vars.D2]]) + ensure_no_exception(exceptions) + #Clear static arp entries + print_log("Clearing ARP entries") + arp_obj.clear_arp_table(vars.D1) + arp_obj.clear_arp_table(vars.D2) + #Clear static ndp entries + print_log("Clearing NDP entries") + arp_obj.clear_ndp_table(vars.D1) + arp_obj.clear_ndp_table(vars.D2) + +def add_port_to_acl_table(config, table_name, port): + config['ACL_TABLE'][table_name]['ports'].append(port) + + +def apply_acl_config(dut, config): + json_config = json.dumps(config) + json.loads(json_config) + st.apply_json2(dut, json_config) + + +def create_streams(tx_tg, rx_tg, rules, match, mac_src, mac_dst): + # use the ACL rule definitions to create match/non-match traffic streams + # instead of hard coding the traffic streams + my_args = { + 'port_handle': data.tgmap[tx_tg]['handle'], 'mode': 'create', 'frame_size': '128', + 'transmit_mode': 'continuous', 'length_mode': 'fixed', 'duration': 1, + 'l2_encap': 'ethernet_ii_vlan', 'rate_pps': data.rate_pps, + 'high_speed_result_analysis': 0, 'mac_src': mac_src, 'mac_dst': mac_dst, + 'port_handle2': data.tgmap[rx_tg]['handle'] + } + + for rule, attributes in rules.items(): + if ("IP_TYPE" in attributes) or ("ETHER_TYPE" in attributes): + continue + if match in rule: + params = {} + tmp = dict(my_args) + for key, value in attributes.items(): + params.update(acl_utils.get_args_l3(key, value, attributes, data.rate_pps, data.tg_type)) + tmp.update(params) + stream = data.tgmap[tx_tg]['tg'].tg_traffic_config(**tmp) + stream_id = stream['stream_id'] + s = {} + s[stream_id] = attributes + s[stream_id]['TABLE'] = rule + data.tgmap[tx_tg]['streams'].update(s) + + +def transmit(tg): + print_log("Transmitting streams") + data.tgmap[tg]['tg'].tg_traffic_control(action='run', stream_handle=list(data.tgmap[tg]['streams'].keys()), + duration=1) + + +def verify_acl_hit_counters(dut, table_name, acl_type="ip"): + result = True + acl_rule_counters = acl_obj.show_acl_counters(dut, acl_type=acl_type, acl_table=table_name) + for rule in acl_rule_counters: + if rule['packetscnt'] == 0: + return False + return result + + +def verify_packet_count(tx, tx_port, rx, rx_port, table): + result = True + tg_tx = data.tgmap[tx] + tg_rx = data.tgmap[rx] + exp_ratio = 0 + attr_list = [] + traffic_details = dict() + action_list = [] + index = 0 + for s_id, attr in tg_tx['streams'].items(): + if table in attr['TABLE']: + index = index + 1 + if attr["PACKET_ACTION"] == "FORWARD": + exp_ratio = 1 + action = "FORWARD" + else: + exp_ratio = 0 + action = "DROP" + traffic_details[str(index)] = { + 'tx_ports': [tx_port], + 'tx_obj': [tg_tx["tg"]], + 'exp_ratio': [exp_ratio], + 'rx_ports': [rx_port], + 'rx_obj': [tg_rx["tg"]], + 'stream_list': [[s_id]] + } + attr_list.append(attr) + action_list.append(action) + result_all = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', + comp_type='packet_count', return_all=1, delay_factor=1.2) + for result1, action, attr in zip(result_all[1], action_list, attr_list): + result = result and result1 + if result1: + if action == "FORWARD": + msg = "Traffic successfully forwarded for the rule: {}".format(json.dumps(attr)) + print_log(msg) + else: + msg = "Traffic successfully dropped for the rule: {}".format(json.dumps(attr)) + print_log(msg) + else: + if action == "FORWARD": + msg = "Traffic failed to forward for the rule: {}".format(json.dumps(attr)) + print_log(msg) + else: + msg = "Traffic failed to drop for the rule: {}".format(json.dumps(attr)) + print_log(msg) + return result + + +def initialize_topology(): + print_log("Initializing Topology") + (tg1, tg2, tg_ph_1, tg_ph_2) = get_handles() + data.tgmap = { + "tg1": { + "tg": tg1, + "handle": tg_ph_1, + "streams": {} + }, + "tg2": { + "tg": tg2, + "handle": tg_ph_2, + "streams": {} + } + } + data.vars = vars + + +@pytest.fixture(scope="module", autouse=True) +def acl_v4_module_hooks(request): + # initialize topology + initialize_topology() + + # apply module configuration + apply_module_configuration() + + acl_config1 = acl_data.acl_json_config_v4_l3_traffic + add_port_to_acl_table(acl_config1, 'L3_IPV4_INGRESS', vars.D1T1P1) + + + acl_config2 = acl_data.acl_json_config_v6_l3_traffic + add_port_to_acl_table(acl_config2, 'L3_IPV6_INGRESS', vars.D2T1P1) + + + # creating ACL tables and rules + print_log('Creating ACL tables and rules') + utils.exec_all(True, [ + utils.ExecAllFunc(acl_obj.apply_acl_config, vars.D1, acl_config1), + utils.ExecAllFunc(acl_obj.apply_acl_config, vars.D2, acl_config2), + ]) + + # create streams + data.mac1 = basic_obj.get_ifconfig_ether(vars.D1, vars.D1T1P1) + data.mac2 = basic_obj.get_ifconfig_ether(vars.D2, vars.D2T1P1) + print_log('Creating streams') + create_streams("tg1", "tg2", acl_config1['ACL_RULE'], "L3_IPV4_INGRESS", \ + mac_src="00:0a:01:00:00:01", mac_dst=data.mac1) + create_streams("tg1", "tg2", acl_config2['ACL_RULE'], "L3_IPV6_EGRESS", \ + mac_src="00:0a:01:00:00:01", mac_dst="00:0a:01:00:11:02") + create_streams("tg2", "tg1", acl_config2['ACL_RULE'], "L3_IPV6_INGRESS", \ + mac_src="00:0a:01:00:11:02", mac_dst=data.mac2) + create_streams("tg2", "tg1", acl_config1['ACL_RULE'], "L3_IPV4_EGRESS", \ + mac_src="00:0a:01:00:11:02", mac_dst="00:0a:01:00:00:01") + print_log('Completed module configuration') + + st.log("Configuring ipv4 address on ixia connected interfaces and portchannels present on both the DUTs") + ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.ipv4_address_D1, 24, family="ipv4", config='add') + ip_obj.config_ip_addr_interface(vars.D2, vars.D2T1P1, data.ipv4_address_D2, 24, family="ipv4", config='add') + ip_obj.config_ip_addr_interface(vars.D1, data.portChannelName, data.ipv4_portchannel_D1, 24, family="ipv4", + config='add') + ip_obj.config_ip_addr_interface(vars.D2, data.portChannelName, data.ipv4_portchannel_D2, 24, family="ipv4", + config='add') + + st.log("Configuring ipv6 address on ixia connected interfaces and portchannels present on both the DUTs") + ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.ipv6_address_D1, 64, family="ipv6", config='add') + ip_obj.config_ip_addr_interface(vars.D2, vars.D2T1P1, data.ipv6_address_D2, 64, family="ipv6", config='add') + ip_obj.config_ip_addr_interface(vars.D1, data.portChannelName, data.ipv6_portchannel_D1, 64, family="ipv6", + config='add') + ip_obj.config_ip_addr_interface(vars.D2, data.portChannelName, data.ipv6_portchannel_D2, 64, family="ipv6", + config='add') + + st.log("configuring ipv4 static routes on both the DUTs") + ip_obj.create_static_route(vars.D1, data.ipv4_portchannel_D2, data.ipv4_network_D2, shell="vtysh", + family="ipv4") + ip_obj.create_static_route(vars.D2, data.ipv4_portchannel_D1, data.ipv4_network_D1, shell="vtysh", + family="ipv4") + + st.log("configuring ipv6 static routes on both the DUTs") + ip_obj.create_static_route(vars.D1, data.ipv6_portchannel_D2, data.ipv6_network_D2, shell="vtysh", + family="ipv6") + ip_obj.create_static_route(vars.D2, data.ipv6_portchannel_D1, data.ipv6_network_D1, shell="vtysh", + family="ipv6") + + st.log("configuring static arp entries") + arp_obj.add_static_arp(vars.D1, "1.1.1.2", "00:0a:01:00:00:01", vars.D1T1P1) + arp_obj.add_static_arp(vars.D2, "2.2.2.2", "00:0a:01:00:11:02", vars.D2T1P1) + arp_obj.add_static_arp(vars.D2, "2.2.2.4", "00:0a:01:00:11:02", vars.D2T1P1) + arp_obj.add_static_arp(vars.D1, "1.1.1.4", "00:0a:01:00:00:01", vars.D1T1P1) + arp_obj.add_static_arp(vars.D2, "2.2.2.5", "00:0a:01:00:11:02", vars.D2T1P1) + arp_obj.add_static_arp(vars.D1, "1.1.1.5", "00:0a:01:00:00:01", vars.D1T1P1) + arp_obj.add_static_arp(vars.D2, "2.2.2.6", "00:0a:01:00:11:02", vars.D2T1P1) + arp_obj.add_static_arp(vars.D1, "1.1.1.6", "00:0a:01:00:00:01", vars.D1T1P1) + arp_obj.show_arp(vars.D1) + arp_obj.show_arp(vars.D2) + + st.log("configuring static ndp entries") + arp_obj.config_static_ndp(vars.D1, "1001::2", "00:0a:01:00:00:01", vars.D1T1P1, operation="add") + arp_obj.config_static_ndp(vars.D2, "2001::2", "00:0a:01:00:11:02", vars.D2T1P1, operation="add") + arp_obj.show_ndp(vars.D1) + arp_obj.show_ndp(vars.D2) + + yield + clear_module_configuration() + + +def verify_rule_priority(dut, table_name): + acl_rule = "PermitAny6" if "IPV4" in table_name else "PermitAny5" + acl_rule_counters = acl_obj.show_acl_counters(dut, acl_table=table_name, acl_rule=acl_rule) + if isinstance(acl_rule_counters, bool): + print_log("Failed to read ACL counters") + return False + if len(acl_rule_counters) == 1: + if (int(acl_rule_counters[0]['packetscnt']) != 0): + print_log("ACL Rule priority test failed") + return False + return True + + +@pytest.mark.acl_test123 +def test_ft_acl_ingress_ipv4_l3_forwarding(): + ''' + IPv4 Ingress ACL is applied on DUT1 port connected to TG Port#1 + Traffic is sent on TG Port #1 + Traffic is recieved at TG Port #2 + ''' + transmit('tg1') + result1 = verify_packet_count('tg1', vars.T1D1P1, 'tg2', vars.T1D2P1, "L3_IPV4_INGRESS") + print_log('Verifing IPv4 Ingress ACL hit counters') + result2 = verify_acl_hit_counters(vars.D1, "L3_IPV4_INGRESS") + result3 = verify_rule_priority(vars.D1, "L3_IPV4_INGRESS") + acl_utils.report_result(result1 and result2 and result3) + + +@pytest.mark.acl_test123 +def test_ft_acl_ingress_ipv6_l3_forwarding(): + ''' + IPv6 Ingress ACL is applied on DUT2 port connected to TG Port #2 + Traffic is sent on TG Port #2 + Traffic is recieved at TG Port #1 + ''' + + transmit('tg2') + result1 = verify_packet_count('tg2', vars.T1D2P1, 'tg1', vars.T1D1P1, "L3_IPV6_INGRESS") + print_log('Verifing IPv6 Ingress ACL hit counters') + result2 = verify_acl_hit_counters(vars.D2, "L3_IPV6_INGRESS", acl_type=data.acl_type) + result3 = verify_rule_priority(vars.D2, "L3_IPV6_INGRESS") + acl_utils.report_result(result1 and result2 and result3) + diff --git a/spytest/tests/qos/qos_map.py b/spytest/tests/qos/qos_map.py new file mode 100644 index 00000000000..8f3c72e8d7c --- /dev/null +++ b/spytest/tests/qos/qos_map.py @@ -0,0 +1,392 @@ +import re +import json + +from spytest import st, tgapi + +import apis.qos.qos as qosapi +import apis.system.port as port +from apis.system import basic +import apis.common.asic as asicapi +import apis.system.interface as intf + +from utilities.common import filter_and_select + +obj_name = "AZURE" +port_qos_map = "PORT_QOS_MAP" +tc_queue_map = "TC_TO_QUEUE_MAP" +dscp_tc_map = "DSCP_TO_TC_MAP" + +queue_list = ['2','5'] +dscp_queue_list1 = ['UC1','UC5'] +dscp_queue_list2 = ['UC2','UC4'] +dscp_queue_list3 = ['UC2','UC5'] +dscp_queue_list4 = ['UC5','UC2'] +mqueue_list = ['MC12','MC15'] +strict_pri_queue = ['UC6','UC7'] +strict_dwrr_queue = ['UC4','UC5'] +dwrr_queue = ['UC3','UC4'] +dscp_list = ['20','46'] +smac_list = ['00.00.00.00.00.01','00.00.00.00.00.03'] +dmac_list = ['00.00.00.00.00.02','00.00.00.00.00.04'] + +rate_list = ['100','500'] +exp_list1 = ['500','2500'] +exp_list2 = ['2500','500'] +pkts_sec = 100 +rate_percent = 100 +sp_ratio_1,sp_ratio_2 = 0.01, 1 + +vlan_id = 10 +dut_ip_addr_1 = "10.10.10.1" +dut_ip_addr_2 = "20.20.20.1" +tg_ip_addr_1 = "10.10.10.2" +tg_ip_addr_2 = "20.20.20.2" +tg_ip_addr_3 = "10.10.10.3" +mask = 24 + +tc_to_queue_map_1 = {"0" : "0", "1" : "1", "2" : "2", "3" : "3", + "4" : "4", "5" : "5", "6" : "6", "7" : "7"} +tc_to_queue_map_2 = {"0" : "0", "1" : "1", "2" : "5", "3" : "3", + "4" : "4", "5" : "2", "6" : "6", "7" : "7"} +#tc1,qval1 = ["0","1","2","3","4","5","6","7"],["0","1","2","3","4","5","6","7"] +tc1,qval1 = ["2","5"],["2","5"] +tc2,qval2 = ["2","5"],["5","2"] + + +dscp_to_tc_map_1 = {"0-2,6,7,9-45,47,49-63":"1", "3":"3", "4":"4", "5":"2", "8":"0", "46":"5", "48":"6"} +dscp_to_tc_map_2 = {"0-2,6-7,9-19,21-45,47,49,51-63":"1", "3":"3", "4,46":"4", "5,20,50":"2", "8":"0", "48":"6"} +dscp_val_2,tc_val_2 = ["20","46"],["2","4"] +dscp_val_3,tc_val_3 = ["20","46"],["1","5"] + +dscp_bind_port = {"dscp_to_tc_map" : "[DSCP_TO_TC_MAP|AZURE]"} + +sched_0, sched_1, sched_2, sched_3 = "scheduler.0", "scheduler.1", "scheduler.2", "scheduler.3" +sched_strict = {"type" : "STRICT", "weight": "25"} +sched_dwrr_1 = {"type" : "DWRR", "weight": "50"} +sched_dwrr_2 = {"type" : "DWRR", "weight": "20"} +sched_dwrr_3 = {"type" : "DWRR", "weight": "10"} +sched_bind_0 = {"scheduler" : "[SCHEDULER|scheduler.0]"} +sched_bind_1 = {"scheduler" : "[SCHEDULER|scheduler.1]"} +sched_bind_2 = {"scheduler" : "[SCHEDULER|scheduler.2]"} +sched_bind_3 = {"scheduler" : "[SCHEDULER|scheduler.3]"} +pfc_bind = {"pfc_enable" : "2"} +wred_obj = "AZURE_LOSSLESS" +wred_profile = "WRED_PROFILE" +wred_input = { + "wred_green_enable" : "true", + "wred_yellow_enable" : "true", + "wred_red_enable" : "true", + "ecn" : "ecn_all", + "green_max_threshold" : "200000", + "green_min_threshold" : "100000", + "yellow_max_threshold" : "2097152", + "yellow_min_threshold" : "1048576", + "red_max_threshold" : "2097152", + "red_min_threshold" : "1048576", + "green_drop_probability" : "100", + "yellow_drop_probability": "5", + "red_drop_probability" : "5" + } +wred_bind = {"wred_profile": "[" + wred_profile + "|" + wred_obj + "]"} + + +def create_glob_vars(): + global vars, tg, tg_ph_1, tg_ph_2, tg_ph_3, tg_ph_4 + global d1_p1, d1_p2, d2_p1, d2_p2, d1_d2_p1, d2_d1_p1 + vars = st.ensure_min_topology("D1T1:2","D2T1:2","D1D2:1") + + tg1, tg_ph_1 = tgapi.get_handle_byname("T1D1P1") + tg2, tg_ph_2 = tgapi.get_handle_byname("T1D1P2") + tg3, tg_ph_3 = tgapi.get_handle_byname("T1D2P1") + tg4, tg_ph_4 = tgapi.get_handle_byname("T1D2P2") + tg = tg1; st.unused(tg2, tg3, tg4) + d1_p1, d1_p2 = vars.D1T1P1, vars.D1T1P2 + d2_p1, d2_p2 = vars.D2T1P1, vars.D2T1P2 + d1_d2_p1, d2_d1_p1 = vars.D1D2P1, vars.D2D1P1 + +def create_stream(handle,type,vlan='10',pcp='1',rate='100',dscp='1', + src_mac='00.00.00.00.00.01',dst_mac='00.00.00.00.00.02', + src_ip=tg_ip_addr_1,dst_ip=tg_ip_addr_2): + + if type=='l2': + tg.tg_traffic_config(mac_src = src_mac, mac_dst=dst_mac, rate_pps=rate, mode='create', + port_handle=handle,l2_encap='ethernet_ii_vlan', vlan_id=vlan, vlan_user_priority= pcp, + duration='5', transmit_mode='continuous') + elif type=='l3': + d1_tg_p1_mac = basic.get_ifconfig(vars.D1, d1_p1)[0]['mac'] + tg.tg_traffic_config(port_handle=handle, mac_src = src_mac, mac_dst=d1_tg_p1_mac, + rate_pps=rate, mode='create', l2_encap='ethernet_ii', duration='5',ip_src_addr= src_ip, + ip_dst_addr=dst_ip, l3_protocol= 'ipv4', ip_dscp=dscp,l3_length='512', + mac_discovery_gw=dut_ip_addr_1, transmit_mode='continuous') + elif type == 'copp_arp': + tg.tg_traffic_config(port_handle=handle, mac_src=src_mac, mac_dst="ff:ff:ff:ff:ff:ff", + rate_pps=rate,mode='create',l2_encap='ethernet_ii', transmit_mode='continuous', + l3_protocol= 'arp',arp_src_hw_addr=src_mac,arp_dst_hw_addr="00:00:00:00:00:00", + arp_operation='arpRequest',ip_src_addr=tg_ip_addr_1,ip_dst_addr=tg_ip_addr_2) + elif type == 'wred': + tg.tg_traffic_config(mac_src=src_mac, mac_dst=dst_mac, rate_percent=rate_percent, + mode='create', port_handle=handle, l2_encap='ethernet_ii_vlan', vlan_id=vlan, + vlan_user_priority=pcp, transmit_mode='continuous') + else: + tg.tg_traffic_config(mac_src = src_mac, mac_dst=dst_mac, rate_percent=rate_percent, + mode='create', port_handle=handle,l2_encap='ethernet_ii_vlan', vlan_id=vlan, + vlan_user_priority=pcp, transmit_mode='continuous',duration='5') + + +def verify_traffic(field,type='l2'): + global sp_ratio_1, sp_ratio_2 + + if type == 'l2' or type == 'l3': + traffic_details = { + '1': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg], + 'exp_ratio': [1], + 'rx_ports': [vars.T1D1P2], + 'rx_obj': [tg], + }, + } + else: + if type == 'dwrr_scheduling': + sp_ratio_1, sp_ratio_2 = 0.3, 0.7 + elif type == 'sp_dwrr_scheduling': + sp_ratio_1, sp_ratio_2 = 0.008, 1 + else: + sp_ratio_1, sp_ratio_2 = 0.015, 1 + traffic_details = { + '1': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg], + 'exp_ratio': [sp_ratio_1], + 'rx_ports': [vars.T1D2P1], + 'rx_obj': [tg], + 'tolerance_factor' : '3' + }, + '2': { + 'tx_ports': [vars.T1D1P2], + 'tx_obj': [tg], + 'exp_ratio': [sp_ratio_2], + 'rx_ports': [vars.T1D2P2], + 'rx_obj': [tg], + 'tolerance_factor': '2' + }, + } + + return tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='aggregate', comp_type=field) + + +def verify_queue_traffic_and_counter(field, qname_list, rate_list, val_list, type='l2'): + result1,result2 = True, True + + qosapi.clear_qos_queue_counters(vars.D1) + if type == 'l2' or type == 'l3': + tg.tg_traffic_control(action='run', port_handle=[tg_ph_1, tg_ph_2], duration='5') + elif type == 'wred': + tg.tg_traffic_control(action='run', port_handle=[tg_ph_1,tg_ph_2,tg_ph_3,tg_ph_4]) + else: + tg.tg_traffic_control(action='run', port_handle=[tg_ph_1, tg_ph_2, tg_ph_3, tg_ph_4], + duration='5') + st.wait(5) + + if not verify_traffic(field,type): + st.log('traffic verification failed') + result1 = False + else: + st.log('traffic verification passed') + + if type == 'l2' or type == 'l3': + for queue,val,tol in zip(qname_list,val_list,rate_list): + if not qosapi.verify_qos_queue_counters(dut=vars.D1, port=vars.D1T1P2, queue_name=queue, + param_list=['pkts_count'], val_list=[val], tol_list=[tol]): + st.log('qos queue counter verification failed') + result2 = False + else: + st.log('qos queue counter verification passed') + elif type == 'wred': + result2 = verify_wred_drop(queue=qname_list, port=d1_d2_p1) + else: + result2 = verify_queue_drop(queue_list=qname_list, port=d1_d2_p1) + + return bool(result1 and result2) + +def verify_wred_drop(port,queue): + success = True + asicapi.dump_counters(vars.D1) + return success + +def verify_queue_drop(queue_list,port): + global pkts_sec, sp_ratio_1 + success = True + + cli_out = intf.show_queue_counters(vars.D1, port) + fil_out1 = filter_and_select(cli_out, ['pkts_drop'], {"txq": queue_list[0]}) + fil_out2 = filter_and_select(cli_out, ['pkts_drop'], {"txq": queue_list[1]}) + if not fil_out1: + st.error('queue name: {} not found in output: {}'.format(queue_list[0], cli_out)) + return False + else: + fil_out1 = fil_out1[0] + + if not fil_out2: + st.error('queue name: {} not found in output: {}'.format(queue_list[1], cli_out)) + return False + else: + fil_out2 = fil_out2[0] + + try: + fil_out1['pkts_drop'] = re.sub(",", "", fil_out1['pkts_drop']) + fil_out2['pkts_drop'] = re.sub(",", "", fil_out2['pkts_drop']) + q1_drop, q2_drop = int(fil_out1['pkts_drop']), int(fil_out2['pkts_drop']) + except ValueError: + st.error('cannot get integer value from obtained ' + 'string: {} or {}'.format(fil_out1['pkts_drop'],fil_out2['pkts_drop'])) + return False + + if q1_drop > q2_drop and q1_drop > (pkts_sec * sp_ratio_1): + st.log('queue {} drop {} is more than queue {} drop {} as expected'.format(queue_list[0], + q1_drop, queue_list[1], q2_drop)) + else: + st.error('queue {} drop {} is not more than queue {} drop {}'.format(queue_list[0], + q1_drop, queue_list[1], q2_drop)) + success = False + return success + + +def clear_tgen_stats(flag,type='l2'): + if flag=='start': + create_glob_vars() + action_list = ['reset'] + else: + #send flag option as 'end' + action_list = ['stop','reset'] + + for action in action_list: + if type == 'scheduling': + tg.tg_traffic_control(action=action, port_handle=[tg_ph_1, tg_ph_2, + tg_ph_3, tg_ph_4]) + else: + tg.tg_traffic_control(action=action, port_handle=[tg_ph_1,tg_ph_2]) + +def check_port_speed(dut,in_port1,in_port2,out_port): + global rate_percent, sp_ratio_1, sp_ratio_2, pkts_sec + + in_speed1 = port.get_status(dut, in_port1)[0]['speed'] + in_speed1 = get_speed(in_speed1) + in_speed2 = port.get_status(dut, in_port2)[0]['speed'] + in_speed2 = get_speed(in_speed2) + if in_speed1 != in_speed2: + st.error('ingress port1 {} speed {} and port2 {} speed {} are not same,' + 'skip the testcase'.format(in_port1,in_speed1,in_port2,in_speed2)) + return False + + eg_speed = port.get_status(dut, out_port)[0]['speed'] + eg_speed = get_speed(eg_speed) + + if in_speed1 >= eg_speed and in_speed1 / eg_speed == 1: + rate_percent = 100 + elif in_speed1 >= eg_speed and in_speed1 / eg_speed == 2: + rate_percent = 40 + elif in_speed1 >= eg_speed and in_speed1 / eg_speed ==4: + rate_percent = 20 + elif in_speed1 >= eg_speed and in_speed1 / eg_speed == 10: + rate_percent = 10 + + + pkts_sec = in_speed1 / 1024 + in_speed = in_speed1 + in_speed2 + if in_speed / eg_speed > 1: + st.log("egress speed: {} less than ingress speed: {}, congestion criteria met" + .format(eg_speed, in_speed)) + return True + else: + st.log("egress speed: {} higher than ingress speed: {}, congestion criteria not met" + .format(eg_speed, in_speed)) + return False + +def get_speed(speed): + if "G" in speed: + return int(re.sub('G', '000000', speed)) + +def create_copp_json(dut, block_name, dict_input): + + temp_data = { block_name : dict_input, "OP" : "SET"} + temp_data = [temp_data] + final_data = json.dumps(temp_data) + return st.apply_json(dut, final_data) + + +def verify_counter_cpu_asic_bcm(dut,queue,value,tol): + queue_mc = queue + nooftimes = 3 + queue = 'PERQ_PKT(' + queue + ').cpu0' + queue_mc = 'MC_PERQ_PKT(' + queue_mc + ').cpu0' + for itercountvar in range(nooftimes): + if itercountvar != 0: + st.wait(5) + cli_out = asicapi.get_counters(dut) + fil_out = filter_and_select(cli_out, ["time"], {"key": queue}) + if not fil_out: + fil_out = filter_and_select(cli_out, ["time"], {"key": queue_mc}) + if not fil_out: + st.error('queue: {} not found in output: {}'.format(queue, cli_out)) + if itercountvar < (nooftimes - 1): + continue + return False + else: + if not fil_out[0]['time']: + st.error('queue: {} value is null in the output: {}'.format(queue, fil_out)) + if itercountvar < (nooftimes - 1): + asicapi.clear_counters(dut) + continue + return False + fil_out = fil_out[0] + + if not fil_out['time']: + st.error('queue: {} value is null in the output: {}'.format(queue, cli_out)) + if itercountvar < (nooftimes - 1): + continue + return False + + fil_out['time'] = re.sub(r'|'.join((',', '/s')), "", fil_out['time']) + ob_value = int(fil_out['time']) + start_value = int(value) - int(tol) + end_value = int(value) + int(tol) + if ob_value >= start_value and ob_value <= end_value: + st.log('obtained value {} for queue: {} is in the range b/w ' + '{} and {}'.format(ob_value,queue,start_value,end_value)) + return True + else: + st.error('obtained value {} for queue: {} is NOT in the range b/w ' + '{} and {}'.format(ob_value, queue, start_value, end_value)) + if itercountvar < (nooftimes - 1): + asicapi.clear_counters(dut) + continue + return False + + +def verify_copp_arp_asic_bcm(dut,queue,value,tol,rate='100'): + create_stream(type='copp_arp',handle=tg_ph_1,rate=rate) + st.log("send ARP request and verify cpu counter") + tg.tg_traffic_control(action='run', port_handle=[tg_ph_1]) + st.wait(5) + return verify_counter_cpu_asic_bcm(dut,queue,value,tol) + + +def verify_pfc_counters(dut,port_mode,port,queue): + st.log('verify pfc counters') + return True + + +def get_counter_cpu(dut,queue,value='diff'): + cli_out = asicapi.get_counters(dut) + queue = 'PERQ_PKT(' + queue + ').cpu0' + fil_out = filter_and_select(cli_out, [value], {"key": queue}) + if not fil_out: + st.error('queue: {} not found in output: {}'.format(queue, cli_out)) + return False + else: + fil_out = fil_out[0] + + fil_out[value] = re.sub(r"\+", "", fil_out[value]) + return fil_out[value] + diff --git a/spytest/tests/qos/qos_shaper_json_config.py b/spytest/tests/qos/qos_shaper_json_config.py new file mode 100644 index 00000000000..298215a4f34 --- /dev/null +++ b/spytest/tests/qos/qos_shaper_json_config.py @@ -0,0 +1,68 @@ +def init_vars(vars, port_speed): + shaping_data = {} + if str(port_speed) == '1000': + pir_1G = int((100 * 1000 * 1000)//8) #pir_100M + pir_1_18G = int((118 * 1000 * 1000)//8) #pir_118M + pir_2G = int((200 * 1000 * 1000)//8) #pir_200M + pir_3G = int((300 * 1000 * 1000)//8) #pir_300M + pir_4G = int((400 * 1000 * 1000)//8) #pir_400M + pir_5G = int((500 * 1000 * 1000)//8) #pir_500M + pir_6G = int((600 * 1000 * 1000)//8) #pir_600M + pir_7G = int((700 * 1000 * 1000)//8) #pir_700M + pir_8G = int((800 * 1000 * 1000)//8) #pir_800M + pir_8_8G = int((880 * 1000 * 1000)//8) #pir_880M + pir_9G = int((900 * 1000 * 1000)//8) #pir_900M + pir_10G = int((1* 1000 * 1000 * 1000)//8) #pir_1000M + else: + pir_1G = int((1 * 1000 * 1000 * 1000)//8) + pir_1_18G = int((1.18 * 1000 * 1000 * 1000)//8) + pir_2G = int((2 * 1000 * 1000 * 1000)//8) + pir_3G = int((3 * 1000 * 1000 * 1000)//8) + pir_4G = int((4 * 1000 * 1000 * 1000)//8) + pir_5G = int((5 * 1000 * 1000 * 1000)//8) + pir_6G = int((6 * 1000 * 1000 * 1000)//8) + pir_7G = int((7 * 1000 * 1000 * 1000)//8) + pir_8G = int((8 * 1000 * 1000 * 1000)//8) + pir_8_8G = int((8.8 * 1000 * 1000 * 1000)//8) + pir_9G = int((9 * 1000 * 1000 * 1000)//8) + pir_10G = int((10 * 1000 * 1000 * 1000)//8) + + + port_shaper_json_config = {'port': vars.D1T1P3, 'pir': pir_8G, 'meter_type': 'bytes', 'policy_name': 'port_qos_shaper'} + port_shaper_json_config_10G = {'port': vars.D1T1P3, 'pir': pir_10G, 'meter_type': 'bytes', 'policy_name': 'port_qos_shaper'} + port_shaper_json_config_1G = {'port': vars.D1T1P2, 'pir': pir_1G, 'meter_type': 'bytes', 'policy_name': 'port_qos_shaper'} + queue_shaper_json_config_q0102 = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'shaper_data': [{'queue': 1, 'pir': pir_8G, 'meter_type': 'bytes'}, {'queue': 2, 'pir': pir_2G, 'meter_type': 'bytes'}]} + queue_shaper_min_not_met_json_config = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'shaper_data': [{'queue': 1, 'cir': pir_8_8G, 'pir': pir_10G, 'meter_type': 'bytes'}, {'queue': 2, 'cir': pir_1_18G, 'pir': pir_10G, 'meter_type': 'bytes'}]} + queue_scheduler_json_config_q001 = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'scheduler_data': [{'queue': 1, 'weight': '1', 'type': 'WRR'}, {'queue': 2, 'weight': '1', 'type': 'WRR'}]} + queue_shaper_json_config_q0 = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'shaper_data': [{'queue': 0, 'pir': pir_1G, 'meter_type': 'bytes'}]} + queue_shaper_json_config_q0102_1 = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'shaper_data': [{'queue': 1, 'pir': pir_2G, 'meter_type': 'bytes'}, {'queue': 2, 'pir': pir_8G, 'meter_type': 'bytes'}]} + port_queue_shaper_json_config1 = {'port': vars.D1T1P3, 'pir': pir_7G, 'meter_type': 'bytes', 'policy_name': 'port_qos_shaper'} + port_queue_shaper_json_config2 = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'shaper_data': [{'queue': 1, 'pir': pir_8G, 'meter_type': 'bytes'}, {'queue': 2, 'pir': pir_2G, 'meter_type': 'bytes'}]} + queue_sched_shaper_json_config = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'shaper_data': [{'queue': 1, 'pir': pir_9G, 'meter_type': 'bytes'}, {'queue': 2, 'pir': pir_9G, 'meter_type': 'bytes'}]} + queue_scheduler_json_config_q0102 = {'port': vars.D1T1P3, 'policy_name': 'port_qos_shaper', 'scheduler_data': [{'queue': 1, 'weight': '20', 'type': 'WRR'}, {'queue': 2, 'weight': '80', 'type': 'WRR'}]} + + shaping_data['pir_1G'] = pir_1G + shaping_data['pir_2G'] = pir_2G + shaping_data['pir_3G'] = pir_3G + shaping_data['pir_4G'] = pir_4G + shaping_data['pir_5G'] = pir_5G + shaping_data['pir_6G'] = pir_6G + shaping_data['pir_7G'] = pir_7G + shaping_data['pir_8G'] = pir_8G + shaping_data['pir_8_8G'] = pir_8_8G + shaping_data['pir_9G'] = pir_9G + shaping_data['pir_10G'] = pir_10G + shaping_data['port_shaper_json_config'] = port_shaper_json_config + shaping_data['port_shaper_json_config_10G'] = port_shaper_json_config_10G + shaping_data['port_shaper_json_config_1G'] = port_shaper_json_config_1G + shaping_data['queue_shaper_json_config_q0102'] = queue_shaper_json_config_q0102 + shaping_data['queue_shaper_min_not_met_json_config'] = queue_shaper_min_not_met_json_config + shaping_data['queue_scheduler_json_config_q001'] = queue_scheduler_json_config_q001 + shaping_data['queue_shaper_json_config_q0'] = queue_shaper_json_config_q0 + shaping_data['queue_shaper_json_config_q0102_1'] = queue_shaper_json_config_q0102_1 + shaping_data['port_queue_shaper_json_config1'] = port_queue_shaper_json_config1 + shaping_data['port_queue_shaper_json_config2'] = port_queue_shaper_json_config2 + shaping_data['queue_sched_shaper_json_config'] = queue_sched_shaper_json_config + shaping_data['queue_scheduler_json_config_q0102'] = queue_scheduler_json_config_q0102 + + return shaping_data diff --git a/spytest/tests/qos/test_cos.py b/spytest/tests/qos/test_cos.py new file mode 100644 index 00000000000..bce24dca572 --- /dev/null +++ b/spytest/tests/qos/test_cos.py @@ -0,0 +1,353 @@ +# As of now, we are using bcm cmds to check the CPU queue counters. +# We can not push scripts with bcm cmds to community. +# So, we logged a DUT defect(SONIC-6136) to provide official CLI show commands for the same. +# Once that defect is fixed, we need to modify scripts to use the official command instead of bcm cmd. +import pytest +from spytest import st, tgapi, SpyTestDict + +import apis.routing.ip as ip_obj +import apis.system.basic as basic_obj +from apis.system.interface import show_queue_counters, clear_queue_counters +import apis.common.asic as asicapi +import apis.routing.arp as arp_obj +import apis.switching.vlan as vlan_obj +import apis.switching.mac as mac_obj +import apis.qos.cos as cos_obj +import apis.qos.qos as qos_obj + +from utilities.common import filter_and_select + +@pytest.fixture(scope="module", autouse=True) +def cos_module_hooks(request): + # add things at the start of this module + global vars + + st.log("Ensuring minimum topology") + vars = st.ensure_min_topology("D1T1:2") + + cos_variables() + cos_module_config(config='yes') + + st.log("Getting TG handlers") + data.tg1, data.tg_ph_1 = tgapi.get_handle_byname("T1D1P1") + data.tg2, data.tg_ph_2 = tgapi.get_handle_byname("T1D1P2") + data.tg = data.tg1 + + st.log("Reset and clear statistics of TG ports") + data.tg.tg_traffic_control(action='reset', port_handle=[data.tg_ph_1,data.tg_ph_2]) + data.tg.tg_traffic_control(action='clear_stats', port_handle=[data.tg_ph_1, data.tg_ph_2]) + + st.log("Creating TG streams") + data.streams = {} + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_2, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', transmit_mode='continuous', + vlan_id=data.vlan, mac_src=data.cos_dest_mac, mac_dst='00:0a:12:00:00:01', vlan="enable") + st.log('Stream output:{}'.format(stream)) + data.streams['vlan_tagged'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + pkts_per_burst=2020, l2_encap='ethernet_ii_vlan', transmit_mode='single_burst', + vlan_id=data.vlan, mac_src=data.cos_src1, mac_dst= data.cos_dest_mac, vlan="enable", + vlan_user_priority =data.vlan_priority1) + st.log('Stream output:{}'.format(stream)) + data.streams['cos_transmit'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', + transmit_mode='continuous', + l3_protocol="arp", arp_operation="arpReply", arp_dst_hw_addr=data.mac_addr, + arp_src_hw_addr=data.src_arp_addr) + st.log('Stream output:{}'.format(stream)) + data.streams['ARP_Reply'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', + transmit_mode='continuous', + mac_src=data.src_mac, mac_dst=data.mac_addr) + st.log('Stream output:{}'.format(stream)) + data.streams['switch_unicast_mac'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', + transmit_mode='continuous', l3_protocol='ipv4', + mac_src='00:0a:01:01:23:01', + mac_dst=data.dut_rt_int_mac, ip_src_addr=data.ipv4_source_address, + ip_dst_addr=data.ipv4_address, ip_ttl="0") + st.log('Stream output:{}'.format(stream)) + data.streams['ipv4_ttl_0'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', + transmit_mode='continuous', l3_protocol='ipv4', + mac_src='00:0a:01:01:23:01', + mac_dst=data.dut_rt_int_mac, ip_src_addr=data.ipv4_source_address, + ip_dst_addr=data.ipv4_address, ip_ttl="255") + st.log('Stream output:{}'.format(stream)) + data.streams['ipv4_ttl_255'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', + transmit_mode='continuous', l3_protocol='ipv6', + mac_src='00:0a:01:01:23:01', \ + mac_dst=data.dut_rt_int_mac, ipv6_src_addr=data.ipv6_source_address, + ipv6_dst_addr=data.ipv6_address, ipv6_hop_limit="0") + st.log('Stream output:{}'.format(stream)) + data.streams['ipv6_hop_limit_0'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', length_mode='fixed', frame_size=78, + rate_pps=2000, l2_encap='ethernet_ii_vlan', + transmit_mode='continuous', l3_protocol='ipv6', + mac_src='00:0a:01:01:23:01', \ + mac_dst=data.dut_rt_int_mac, ipv6_src_addr=data.ipv6_source_address, + ipv6_dst_addr=data.ipv6_address, ipv6_hop_limit="255") + st.log('Stream output:{}'.format(stream)) + data.streams['ipv6_hop_limit_255'] = stream['stream_id'] + yield + # add things at the end of this module" + cos_module_config(config='no') + + +@pytest.fixture(scope="function", autouse=True) +def cos_func_hooks(request): + # add things at the start every test case + yield + if st.get_func_name(request) == 'test_ft_cos_cpu_counters': + ip_obj.delete_ip_interface(vars.D1, vars.D1T1P1, data.ipv6_address, data.ipv6_subnet, family="ipv6") + ip_obj.delete_ip_interface(vars.D1, vars.D1T1P1, data.ipv4_address, data.subnet, family="ipv4") + +def cos_module_config(config='yes'): + if config == 'yes': + data.mac_addr = basic_obj.get_ifconfig_ether(vars.D1) + data.dut_rt_int_mac = basic_obj.get_ifconfig_ether(vars.D1, vars.D1T1P1) + else: + st.log("Clearing QoS, VLAN config and resetting and clearing TG stats") + vlan_obj.clear_vlan_configuration(vars.D1, thread=True) + qos_obj.clear_qos_config(vars.D1) + data.tg.tg_traffic_control(action='reset', port_handle=[data.tg_ph_1, data.tg_ph_2]) + data.tg.tg_traffic_control(action='clear_stats', port_handle=[data.tg_ph_1, data.tg_ph_2]) + +def cos_variables(): + global data + data = SpyTestDict() + data.ageout_time = 600 + data.ipv4_address = "10.10.10.1" + data.ipv4_address_tgen = "10.10.10.2" + data.ipv6_address = "1001::1" + data.ipv6_address_tgen = "1001::2" + data.subnet = 24 + data.ipv6_subnet = 64 + data.ipv6_source_address = "2001::1" + data.rate_percent = 100 + data.ipv4_source_address = "20.20.20.1" + data.ip_protocol = 58 + data.l4_protocol = "icmp" + data.ipv6_family = "ipv6" + data.ipv4_family = "ipv4" + data.src_arp_addr = "00:01:02:03:04:05" + data.src_mac = "00:00:00:02:03:04" + data.multi_addr = "01:00:5e:00:01:01" + data.cos_name = "COS" + data.vlan = 555 + data.cos_dest_mac = "00:00:00:00:00:03" + data.cos_src1 = "00:00:00:00:00:01" + data.cos_src2 = "00:00:00:00:00:02" + data.vlan_priority1 = "4" + data.queue = "5" + data.pkts_per_burst = "100" + + +def configuring_ipv4_and_ipv6_address(): + st.log("About to add ipv6 address TGen connected interface") + ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.ipv6_address, data.ipv6_subnet, family="ipv6") + if not ip_obj.verify_interface_ip_address(vars.D1, vars.D1T1P1, "{}/{}".format(data.ipv6_address, data.ipv6_subnet), + family="ipv6"): + st.report_fail("ip_routing_int_create_fail", vars.D1T1P1) + st.log("About to add ipv4 address TGen connected interface") + ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.ipv4_address, data.subnet, family="ipv4") + if not ip_obj.verify_interface_ip_address(vars.D1, vars.D1T1P1, "{}/{}".format(data.ipv4_address, data.subnet), + family="ipv4"): + st.report_fail("ip_routing_int_create_fail", vars.D1T1P1) + + +def configuring_tc_to_queue_map(): + if not cos_obj.config_tc_to_queue_map(vars.D1, data.cos_name, {"4": "5"}): + st.report_fail("queue_map_not_found",data.vlan_priority1) + + +def binding_queue_map_to_interfaces(): + qos_map = {'port': vars.D1T1P1, 'map': 'tc_to_queue_map', 'obj_name': data.cos_name} + if not cos_obj.config_port_qos_map_all(vars.D1, qos_map): + st.report_fail("content_not_found") + + +def cos_counters_checking(value=None): + if not st.is_feature_supported("bcmcmd", vars.D1): + queue_dict_list = show_queue_counters(vars.D1, "CPU") + cpu_queue_counter = int(filter_and_select(queue_dict_list, ['pkts_count'], {'txq': 'MC1'})[0]['pkts_count'].replace(',', '')) + if cpu_queue_counter == 0: + st.error("{} queue_traffic_failed".format(value)) + return False + return True + else: + queue_dict_list = asicapi.get_counters(vars.D1) + for queue_dict in queue_dict_list: + if (queue_dict['key'] == 'MC_PERQ_PKT(1).cpu0'): + if int(queue_dict['value'].replace(",", "")) == 0: + st.error("{} queue_traffic_failed".format(value)) + return False + return True + + +def ping_ipv6_interface(): + h1 = data.tg.tg_interface_config(port_handle=data.tg_ph_1, mode='config', ipv6_intf_addr=data.ipv6_address_tgen, \ + ipv6_prefix_length="64", ipv6_gateway=data.ipv6_address, + src_mac_addr='00:0a:01:01:23:01', arp_send_req='1') + st.log("INTFCONF: " + str(h1)) + # ping from tgen to DUT's TGen connected IPV6 interface + res = tgapi.verify_ping(src_obj=data.tg, port_handle=data.tg_ph_1, dev_handle=h1['handle'], dst_ip=data.ipv6_address, \ + ping_count='2', exp_count='2') + st.log("PING_RES: " + str(res)) + if res: + st.log("Ping succeeded.") + else: + st.log("Ping failed.") + if not arp_obj.show_ndp(vars.D1, data.ipv6_address_tgen): + st.report_fail("ARP_entry_dynamic_entry_fail") + + +def ping_ipv4_interface(): + h1 = data.tg.tg_interface_config(port_handle=data.tg_ph_1, mode='config', intf_ip_addr=data.ipv4_address_tgen, \ + gateway=data.ipv4_address, src_mac_addr='00:00:23:11:14:08', arp_send_req='1') + st.log("INTFCONF: " + str(h1)) + # Ping from tgen to DUT's TGen connected IPV4 interface. + res = tgapi.verify_ping(src_obj=data.tg, port_handle=data.tg_ph_1, dev_handle=h1['handle'], dst_ip=data.ipv4_address, \ + ping_count='1', exp_count='1') + st.log("PING_RES: " + str(res)) + if res: + st.log("Ping succeeded.") + else: + st.log("Ping failed.") + if not arp_obj.show_arp(vars.D1, data.ipv4_address_tgen): + st.report_fail("ARP_entry_dynamic_entry_fail") + + +def fdb_config(): + mac_obj.config_mac_agetime(vars.D1, data.ageout_time) + if not (mac_obj.get_mac_agetime(vars.D1) == data.ageout_time): + st.report_fail("mac_aging_time_failed_config") + + +def vlan_config(): + vlan_obj.create_vlan(vars.D1, data.vlan) + st.log("Adding TGen port connected interface to the vlan with tagging mode") + if not vlan_obj.add_vlan_member(vars.D1, data.vlan, [vars.D1T1P1, vars.D1T1P2], tagging_mode=True): + st.report_fail("vlan_tagged_member_fail", vars.D1T1P2, data.vlan) + if vlan_obj.verify_vlan_brief(vars.D1, data.vlan, tagged=True): + st.report_fail("vlan_create_fail") + + +@pytest.mark.cos_cpu_counters +@pytest.mark.community_unsupported +def test_ft_cos_cpu_counters(): + """ + Author : Sai Durga + FtOpSoQoSCosCqFn001 : Verify that switching, routing, ipv6 and ipv4 packets go into appropriate CPU CoS queue + ######################################################### + #TestBed : #2 + #2*TGEN ports--- D1---2 links-------D2--------2*TGEN ports + ########################################################## + Setup: + TGen-1 ---------DUT + Enable ip routing and ipv6 unicast-routing globally. + Procedure: + 1. Configure a port based routing interface with valid ipv6 address. + Configure the routing interface on TG port connected to that port in same subnet. + Send Ipv6 packets with hop-limit = 255 from TGen-1 destined to the IP address configured on DUT port. + 2. Configure a port based routing interface with valid ipv6 address. + Configure the routing interface on TG port connected to that port in same subnet. + Send Ipv6 packets with hop-limit = 0 from TGen-1 destined to the IP address configured on DUT port. + 3. Configure a port based routing interface with valid ipv4 address. + Configure the routing interface on TG port connected to that port in same subnet. + Send Ipv4 packets from TGen-1 destined to the IP address configured on DUT port. In that TGen stream, configure TTL = 0. + 4. ARP Reply to router mac + 5. Unicast to switch mac address + Expected behavior: + 1. Observed that these packets are received on CoS queue# 1. + """ + status = True + configuring_ipv4_and_ipv6_address() + st.log("About to send switching packets") + st.log("Configuring TGen with ARP reply with switch MAC as destination ARP MAC") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['ARP_Reply']) + if not cos_counters_checking(value="ARP_Packets"): + status = False + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['ARP_Reply']) + st.log("Sending unicast frames with switch MAC") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['switch_unicast_mac']) + if not cos_counters_checking(value="switching_packets"): + status = False + st.log("Switching traffic sent to appropriate queue") + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['switch_unicast_mac']) + ping_ipv6_interface() + st.log("Sending ipv6 packets with hop limit 0") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['ipv6_hop_limit_0']) + if not cos_counters_checking(value="ipv6_packets_hop_limit_0"): + status = False + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['ipv6_hop_limit_0']) + st.log("Sending ipv6 packets with hop limit 255") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['ipv6_hop_limit_255']) + if not cos_counters_checking(value="ipv6_packets_hop_limit_255"): + status = False + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['ipv6_hop_limit_255']) + ping_ipv4_interface() + st.log("Sending ipv4 packets with TTL value 0") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['ipv4_ttl_0']) + if not cos_counters_checking(value="ipv4_packets_ttl_0"): + status = False + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['ipv4_ttl_0']) + st.log("Traffic sent to correct queue with v4 and v6 traffic") + if not status: + st.report_fail('queue_traffic_failed') + st.report_pass("test_case_passed") + + +@pytest.mark.cos_queue +@pytest.mark.community_unsupported +def test_ft_cos_tc_queue_map(): + """ + Author : Sai Durga + FtOpSoQoSCosFn001 : Verify that the traffic is mapped to the correct CoS queues based on the configured 'priority --> CoS map' table + Setup: + ========== + DUT----2TGen + 1) Connect 2 TGen ports (I1 transmitting ports I2 - receiving port) + 2) Set bridge aging-time to be 10 mins. + 3) Send in a layer2 packet from port2 (mac = 000000000002) to the DUT to create an entry in the fwd database. + 4) Change the TC to queue map to 4 to 5 + Procedure: + ========== + 1) Start transmitting the TGen streams with VLAN priority 4 from port1 to port2. + Expected Result: + ================== + 1) Expected mapping is traffic from port1 sent to queue5 + """ + + st.log("Configuring MAC age time out") + fdb_config() + st.log("Creating vlan and adding the TGen connected ports to it") + vlan_config() + st.log("Sending traffic from port 2 to learn the MAC in FDB table") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['vlan_tagged']) + st.wait(10) + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['vlan_tagged']) + st.log("Verifying FDB table") + if not mac_obj.verify_mac_address_table(vars.D1, data.cos_dest_mac, type='Dynamic'): + st.report_fail("mac_address_verification_fail") + st.log("Configuring cos queue and binding it on interfaces") + configuring_tc_to_queue_map() + binding_queue_map_to_interfaces() + clear_queue_counters(vars.D1) + st.log("Sending traffic from 1st port") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['cos_transmit']) + st.wait(2) + data.tg.tg_traffic_control(action='stop', stream_handle=data.streams['cos_transmit']) + st.wait(2) + counter = show_queue_counters(vars.D1, vars.D1T1P2, queue='UC{}'.format(data.queue)) + if not(counter and counter[0]['pkts_count']): + st.report_fail("queue_traffic_failed") + if int(counter[0]['pkts_count'].replace(',', '')) < 2000: + st.report_fail("queue_traffic_failed") + st.report_pass("test_case_passed") diff --git a/spytest/tests/qos/test_wred.py b/spytest/tests/qos/test_wred.py new file mode 100644 index 00000000000..5530d2f0253 --- /dev/null +++ b/spytest/tests/qos/test_wred.py @@ -0,0 +1,179 @@ +import pytest + +from spytest import st, tgapi, SpyTestDict +from spytest.utils import poll_wait + +import tests.qos.wred_ecn_config_json as wred_config +import apis.qos.qos as qos_obj +import apis.system.switch_configuration as sconf_obj +import apis.switching.vlan as vlan_obj +import apis.common.asic as asicapi +import apis.qos.cos as cos_obj +from apis.qos.wred import apply_wred_ecn_config +import apis.switching.mac as mac_obj +import apis.system.interface as ifapi +import utilities.common as utils + +data = SpyTestDict() +data.ageout_time = 600 +data.rate_percent = 100 +data.vlan = 555 +data.dscp_dest_mac = "00:00:00:00:00:03" +data.dscp_src1 = "00:00:00:00:00:01" +data.dscp_src2 = "00:00:00:00:00:02" +data.vlan_priority1 = "4" +data.cos_name = "COS" +data.dscp_name = "DSCP" +data.queue = "5" + +@pytest.fixture(scope="module", autouse=True) +def wred_module_hooks(request): + # add things at the start of this module + global vars + vars = st.ensure_min_topology("D1T1:3") + wred_data = wred_config.init_vars(vars, apply_wred=True) + st.log('Creating WRED table') + utils.exec_all(True, [utils.ExecAllFunc(apply_wred_ecn_config, vars.D1, wred_data['wred_config_json'])]) + + st.log("Getting TG handlers") + data.tg1, data.tg_ph_1 = tgapi.get_handle_byname("T1D1P1") + data.tg2, data.tg_ph_2 = tgapi.get_handle_byname("T1D1P2") + data.tg3, data.tg_ph_3 = tgapi.get_handle_byname("T1D1P3") + data.tg = data.tg1 + + st.log("Reset and clear statistics of TG ports") + data.tg.tg_traffic_control(action='reset', port_handle=[data.tg_ph_1, data.tg_ph_2, data.tg_ph_3]) + data.tg.tg_traffic_control(action='clear_stats', port_handle=[data.tg_ph_1, data.tg_ph_2, data.tg_ph_3]) + st.log("Creating TG streams") + data.streams = {} + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_3, mode='create', length_mode='fixed', frame_size=64, + pkts_per_burst=10, l2_encap='ethernet_ii_vlan', transmit_mode='single_burst', + vlan_id=data.vlan, mac_src=data.dscp_dest_mac, mac_dst='00:0a:12:00:00:01', + vlan="enable") + data.streams['vlan_tagged_egress'] = stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', transmit_mode='continuous', + length_mode='fixed', rate_percent=10, l2_encap='ethernet_ii_vlan', + vlan_id=data.vlan, vlan="enable", + mac_src=data.dscp_src1, mac_dst=data.dscp_dest_mac, l3_protocol='ipv4', + ip_src_addr='1.1.1.1', ip_dst_addr='5.5.5.5', + ip_dscp="8", high_speed_result_analysis=0, + track_by='trackingenabled0 ipv4DefaultPhb0', + ip_dscp_tracking=1) + data.streams['dscp1']= stream['stream_id'] + stream = data.tg.tg_traffic_config(port_handle=data.tg_ph_1, mode='create', transmit_mode='continuous', + length_mode='fixed', rate_percent=10, l2_encap='ethernet_ii_vlan', + vlan_id=data.vlan, vlan="enable", + mac_src=data.dscp_src2, mac_dst=data.dscp_dest_mac, l3_protocol='ipv4', + ip_src_addr='1.1.1.1', ip_dst_addr='5.5.5.5', + ip_dscp="24", high_speed_result_analysis=0, + track_by='trackingenabled0 ipv4DefaultPhb0', + ip_dscp_tracking=1) + data.streams['dscp2'] = stream['stream_id'] + yield + #clearing WRED config + qos_obj.clear_qos_config(vars.D1) + vlan_obj.clear_vlan_configuration(vars.D1, thread=True) + + +@pytest.fixture(scope="function", autouse=True) +def wred_func_hooks(request): + # add things at the start every test case + # use 'st.get_func_name(request)' to compare + # if any thing specific a particular test case + yield + # add things at the end of this module" + + +def wred_running_config(): + if not sconf_obj.verify_running_config(vars.D1, "WRED_PROFILE", "WRED", "green_max_threshold","900000",): + st.report_fail("wred_config_not_updated_in_config_db") + +def configuring_tc_to_queue_map(): + cos_obj.config_tc_to_queue_map(vars.D1, data.cos_name, {"3": "3", "4": "4",}) + +def configuring_dscp_to_tc_map(): + cos_obj.config_dscp_to_tc_map(vars.D1, data.dscp_name, {"8": "3", "24": "4",}) + +def binding_queue_map_to_interfaces(): + qos_maps = [{'port': vars.D1T1P1, 'map': 'tc_to_queue_map', 'obj_name': data.cos_name}, {'port': vars.D1T1P1, 'map': 'dscp_to_tc_map', 'obj_name': data.dscp_name}, {'port': vars.D1T1P2, 'map': 'tc_to_queue_map', 'obj_name': data.cos_name}, {'port': vars.D1T1P2, 'map': 'dscp_to_tc_map', 'obj_name': data.dscp_name}, {'port': vars.D1T1P3, 'map': 'tc_to_queue_map', 'obj_name': data.cos_name}, {'port': vars.D1T1P3, 'map': 'dscp_to_tc_map', 'obj_name': data.dscp_name}] + cos_obj.config_port_qos_map_all(vars.D1, qos_maps) + + +def fdb_config(): + mac_obj.config_mac_agetime(vars.D1, data.ageout_time) + if not (mac_obj.get_mac_agetime(vars.D1) == data.ageout_time): + st.report_fail("mac_aging_time_failed_config") + +def vlan_config(): + vlan_obj.create_vlan(vars.D1, data.vlan) + st.log("Adding TGen port connected interface to the vlan with tagging mode") + if not vlan_obj.add_vlan_member(vars.D1, data.vlan, [vars.D1T1P1, vars.D1T1P2, vars.D1T1P3], tagging_mode=True): + st.report_fail("vlan_tagged_member_fail") + if not vlan_obj.verify_vlan_brief(vars.D1, data.vlan): + st.report_fail("vlan_create_fail",data.vlan) + +def cos_counters_checking(value=None): + queue_dict_list = asicapi.get_counters(vars.D1) + for queue_dict in queue_dict_list: + if (queue_dict['key'] == 'WRED_PKT_GRE.'): + if int(queue_dict['value'].replace(",", "")) == 0: + st.error("{} queue_traffic_failed".format(value)) + return False + return True + +@pytest.mark.wred +@pytest.mark.qos_wred_ecn +@pytest.mark.community_unsupported +def test_ft_wred_functionality(): + """ + Author : Sai Durga + FtOpSoQsWdFn012 : Verify that WRED fnctionality working fine and WRED green drop counters incremented properly. + Setup: + =========== + DUT-----3---- TGen + + Procedure: + =========== + 1. Create a VLAN 10 and participate all the 3 ports in VLAN 10. + 2. Send tagged traffic from 3rd port and check the FDB table. + 3. Now configure WRED, dscp_to_tc_map and tc to queue map tables and bind it to 3 ports. + 4. Now send matched traffic from port1 to port3 and unmatched traffic from port2 to port3. + + Expected Results: + ===================== + 1. Verify that VLAN 10 created and all the 3 ports added to vlan 10 + 2. Verify that FDB table updated with 3rd port MAC addresses. + 3. Verify that WRED, dscp_to_tc_map and tc to queue map tables created and binded to 3 ports + 5. Verify that matched traffic forwared to configured queues based on the min and max threshold values and unmatched traffic is dropped and WRED green incremented properly. + + + """ + + + st.log("Configuring MAC age time out") + fdb_config() + st.log("Creating vlan and adding the TGen connected ports to it") + vlan_config() + st.log("Clearing the interface counters before sending the traffic") + ifapi.clear_interface_counters(vars.D1, interface_type="all") + st.log("Sending traffic from port 3 to learn the MAC in FDB table") + data.tg.tg_traffic_control(action='run', stream_handle=data.streams['vlan_tagged_egress']) + st.log("Verifying FDB table") + if not poll_wait(mac_obj.verify_mac_address_table, 30, vars.D1, data.dscp_dest_mac): + st.log("Displaying the interface counters to verify traffic is sent or not") + ifapi.show_interface_counters_all(vars.D1) + st.report_fail("mac_address_verification_fail") + wred_running_config() + configuring_tc_to_queue_map() + configuring_dscp_to_tc_map() + binding_queue_map_to_interfaces() + st.log("Clearing the interface counters before sending the traffic") + ifapi.clear_interface_counters(vars.D1, interface_type="all") + st.log("Sending traffic from 1st and 2nd port") + data.tg.tg_traffic_control(action='run', stream_handle=[data.streams['dscp1'], data.streams['dscp2']]) + st.wait(3) + data.tg.tg_traffic_control(action='stop', stream_handle=[data.streams['dscp1'], data.streams['dscp2'], data.streams['vlan_tagged_egress']]) + st.log("Displaying the interface counters after traffic test") + ifapi.show_interface_counters_all(vars.D1) + cos_counters_checking() + st.report_pass("test_case_passed") diff --git a/spytest/tests/routing/BGP/bgplib.py b/spytest/tests/routing/BGP/bgplib.py index 2d40416226c..1b6a4cf286c 100644 --- a/spytest/tests/routing/BGP/bgplib.py +++ b/spytest/tests/routing/BGP/bgplib.py @@ -839,6 +839,7 @@ def l3tc_vrfipv4v6_address_leafspine_tg_bgp_config(config='yes', vrf_type='all', bgpapi.config_bgp_multi_neigh_use_peergroup(data[device_type], local_asn=dut_as, peer_grp_name='leaf_tg6', remote_asn=tg_as+j-1, neigh_ip_list=tg_neigh6_list, family='ipv6', activate=1) + else: bgpapi.cleanup_bgp_config([data[dut] for dut in data['leaf_routers']+data['spine_routers']]) @@ -1288,7 +1289,7 @@ def configure_base_for_route_adv_and_filter(dut1, dut2, topo, config_items): :return: """ cli_type = st.get_ui_type(dut2, cli_type="") - cli_type = "vtysh" if cli_type in ['click', "vtysh"] else cli_type + cli_type = "vtysh" if cli_type in ['click', "vtysh"] else ("klish" if cli_type in ["rest-patch","rest-put"] else cli_type) use_global_rmap = rmapapi.RouteMap("UseGlobal") use_global_rmap.add_permit_sequence('10') @@ -1412,7 +1413,8 @@ def unconfigure_base_for_route_adv_and_filter(dut1, dut2, topo, config_items): """ cli_type = st.get_ui_type(dut1, cli_type="") - cli_type = "vtysh" if cli_type in ['click', "vtysh"] else cli_type + cli_type = "vtysh" if cli_type in ['click', "vtysh"] else ( + "klish" if cli_type in ["rest-patch", "rest-put"] else cli_type) bgpapi.config_bgp_network_advertise(dut1, topo['dut1_as'], '101.1.1.0/24', config='no') bgpapi.config_bgp_network_advertise(dut1, topo['dut1_as'], '102.1.1.0/24', config='no') bgpapi.config_bgp_network_advertise(dut1, topo['dut1_as'], '101:1::/64', addr_family='ipv6', config='no') diff --git a/spytest/tests/routing/BGP/bgpsplib.py b/spytest/tests/routing/BGP/bgpsplib.py index e98241427ea..d00283029b5 100644 --- a/spytest/tests/routing/BGP/bgpsplib.py +++ b/spytest/tests/routing/BGP/bgpsplib.py @@ -1234,8 +1234,8 @@ def bgp_sp_add_del_dut(dut, device_name, device_type='DUT', add='yes'): return True - st.log("BGP SP - Dut {} FAILED".format(action_str)) - return False + #st.log("BGP SP - Dut {} FAILED".format(action_str)) + #return False @staticmethod @@ -1287,8 +1287,8 @@ def bgp_sp_add_del_link(dut, link_type, link_name, intf_name, add='yes'): del sp_topo[dut]['intf'][link_name] return True - st.log("BGP SP - Link {} FAILED".format(action_str)) - return False + #st.log("BGP SP - Link {} FAILED".format(action_str)) + #return False @staticmethod @@ -1378,16 +1378,16 @@ def bgp_sp_add_del_link_ip(dut, link_name, ip_addr, subnet, rmt_ip, addr_family, st.log("BGP SP - {} {} does not exist".format(dut, link_name)) return True - if_data = sp_topo[dut]['intf'][link_name] - ip_data = sp_topo[dut][addr_family]['link'][link_name] + #if_data = sp_topo[dut]['intf'][link_name] + #ip_data = sp_topo[dut][addr_family]['link'][link_name] del sp_topo[dut][addr_family]['link'][link_name] #st.log("BGP SP - Deleted IP link {} {}".format(link_name, ip_data)) return True - st.log("BGP SP - Link ip {} FAILED".format(action_str)) - return False + #st.log("BGP SP - Link ip {} FAILED".format(action_str)) + #return False @staticmethod @@ -1648,8 +1648,8 @@ def bgp_sp_bgp_ip_route_is_selected(dut, route_prefix_list=[], addr_family='ipv4 match_selected ={'status_code': '*>'} selected_entries = BGPSP.bgp_sp_get_matching_entries(matched_entries, match_selected) - if not matched_entries: - return False + #if not matched_entries: + #return False if not match: return True @@ -2055,12 +2055,12 @@ def bgp_sp_setup_testbed_topology(per_node_nw='no', nw_ip_octet='10'): nwoct2 = from_dut_idx st_rt = "{}.{}.{}.{}".format(nwoct4, nwoct3, nwoct2, 0) - next_hop = BGPSP.bgp_sp_get_dut_loopback_ip(from_dut, 0, 'ipv4') + #next_hop = BGPSP.bgp_sp_get_dut_loopback_ip(from_dut, 0, 'ipv4') next_hop = BGPSP.bgp_sp_get_unused_dut_interface(from_dut) BGPSP.bgp_sp_add_del_dut_static_route_prefix(from_dut, st_rt, 24, next_hop, 'ipv4', add='yes') st_rt = "{}:{}:{}::{}".format(nwoct4, nwoct3, nwoct2, 0) - next_hop = BGPSP.bgp_sp_get_dut_loopback_ip(from_dut, 0, 'ipv6') + #next_hop = BGPSP.bgp_sp_get_dut_loopback_ip(from_dut, 0, 'ipv6') next_hop = BGPSP.bgp_sp_get_unused_dut_interface(from_dut) BGPSP.bgp_sp_add_del_dut_static_route_prefix(from_dut, st_rt, 64, next_hop, 'ipv6', add='yes') @@ -2222,7 +2222,7 @@ def bgp_sp_show_dut_bgp_running_config(dut_list=[]): st.vtysh_config(tb_dut, "do show running-config bgp") @staticmethod - def bgp_sp_loopback_interface_config_unconfig(config='yes', vrf='default'): + def bgp_sp_loopback_interface_config_unconfig(config='yes', vrf='default', threaded_run=True): """ :param config: @@ -2233,7 +2233,7 @@ def bgp_sp_loopback_interface_config_unconfig(config='yes', vrf='default'): st.banner("{}uring LOOPBACK Interface on all nodes.".format(action_str)) result = True - threaded_run = True + #threaded_run = True dut_list = BGPSP.bgp_sp_get_dut_list() #+ BGPSP.bgp_sp_get_tg_list() dut_thread = [] @@ -2259,10 +2259,9 @@ def bgp_sp_loopback_interface_config_unconfig(config='yes', vrf='default'): dut_thread.append(putils.ExecAllFunc(ipapi.config_loopback_interfaces, tb_dut, loopback_name=loopback_names, config=config)) else : result = ipapi.config_loopback_interfaces(tb_dut, loopback_name=loopback_names, config=config) - - if not result : - st.log("{}uring {} loopback interfaces FAILED".format(action_str, dut)) - return False + if not result : + st.log("{}uring {} loopback interfaces FAILED".format(action_str, dut)) + return False if threaded_run: [out, exceptions] = putils.exec_all(bgplib.fast_start, dut_thread) @@ -2273,7 +2272,7 @@ def bgp_sp_loopback_interface_config_unconfig(config='yes', vrf='default'): @staticmethod - def bgp_sp_loopback_address_config_unconfig(config='yes', vrf='default', addr_family='all'): + def bgp_sp_loopback_address_config_unconfig(config='yes', vrf='default', addr_family='all', threaded_run=True, debug_run=False): """ :param config: @@ -2289,8 +2288,8 @@ def bgp_sp_loopback_address_config_unconfig(config='yes', vrf='default', addr_fa st.log("SP topo:\n{}\n".format(sp_topo)) return False - threaded_run = True - debug_run = False + #threaded_run = True + #debug_run = False result = True config = 'add' if config == 'yes' else 'remove' @@ -2323,11 +2322,10 @@ def bgp_sp_loopback_address_config_unconfig(config='yes', vrf='default', addr_fa dut_thread.append([ipapi.config_unconfig_interface_ip_addresses, tb_dut, if_data_list, config]) else : result = ipapi.config_unconfig_interface_ip_addresses(tb_dut, if_data_list, config=config) - - if not result: - BGPSP.bgp_sp_show_dut_cmd_logs(dut) - st.log("{}uring {} loopback address FAILED".format(action_str, dut)) - return False + if not result: + BGPSP.bgp_sp_show_dut_cmd_logs(dut) + st.log("{}uring {} loopback address FAILED".format(action_str, dut)) + return False if debug_run: BGPSP.bgp_sp_show_dut_if_cmd_logs(dut) @@ -2341,7 +2339,7 @@ def bgp_sp_loopback_address_config_unconfig(config='yes', vrf='default', addr_fa @staticmethod - def bgp_sp_interface_address_all_config_unconfig(config='yes', vrf='default', addr_family='all'): + def bgp_sp_interface_address_all_config_unconfig(config='yes', vrf='default', addr_family='all', threaded_run=True, debug_run=False): """ :param config: @@ -2358,8 +2356,8 @@ def bgp_sp_interface_address_all_config_unconfig(config='yes', vrf='default', ad st.log("SP topo:\n{}\n".format(sp_topo)) return False - threaded_run = True - debug_run = False + #threaded_run = True + #debug_run = False result = True config = 'add' if config == 'yes' else 'remove' @@ -2392,11 +2390,10 @@ def bgp_sp_interface_address_all_config_unconfig(config='yes', vrf='default', ad dut_thread.append([ipapi.config_unconfig_interface_ip_addresses, tb_dut, if_data_list, config]) else : result = ipapi.config_unconfig_interface_ip_addresses(tb_dut, if_data_list, config=config) - - if not result: - BGPSP.bgp_sp_show_dut_cmd_logs(dut) - st.log("{}uring {} Interface address FAILED".format(action_str, dut)) - return False + if not result: + BGPSP.bgp_sp_show_dut_cmd_logs(dut) + st.log("{}uring {} Interface address FAILED".format(action_str, dut)) + return False if debug_run: BGPSP.bgp_sp_show_dut_if_cmd_logs(dut) @@ -2410,7 +2407,7 @@ def bgp_sp_interface_address_all_config_unconfig(config='yes', vrf='default', ad @staticmethod - def bgp_sp_tg_interface_ip_all_config_unconfig(config='yes', vrf='default', addr_family='all'): + def bgp_sp_tg_interface_ip_all_config_unconfig(config='yes', vrf='default', addr_family='all', threaded_run=True): """ :param config: @@ -2428,7 +2425,7 @@ def bgp_sp_tg_interface_ip_all_config_unconfig(config='yes', vrf='default', addr return False result = True - threaded_run = True + #threaded_run = True dut_thread = [] dut_list = BGPSP.bgp_sp_get_tg_list() @@ -2454,10 +2451,9 @@ def bgp_sp_tg_interface_ip_all_config_unconfig(config='yes', vrf='default', addr dut_thread.append([BGPSP.bgp_sp_tg_link_ip_config_unconfig, dut, link_name, addr_family, vrf, config]) else : result = BGPSP.bgp_sp_tg_link_ip_config_unconfig(dut, link_name, addr_family, vrf, config=config) - - if not result: - BGPSP.bgp_sp_show_dut_cmd_logs(dut) - st.log("{}uring TG {} Interface address FAILED".format(action_str, dut)) + if not result: + BGPSP.bgp_sp_show_dut_cmd_logs(dut) + st.log("{}uring TG {} Interface address FAILED".format(action_str, dut)) if threaded_run: [out, exceptions] = putils.exec_all(bgplib.fast_start, dut_thread) @@ -2592,7 +2588,7 @@ def bgp_sp_tg_link_ip_config_unconfig(dut, link_name, addr_family='all', vrf='de @staticmethod - def bgp_sp_static_route_config_unconfig(config='yes', vrf='default', addr_family='all'): + def bgp_sp_static_route_config_unconfig(config='yes', vrf='default', addr_family='all', threaded_run=True, debug_run=False): """ :param config: @@ -2608,8 +2604,8 @@ def bgp_sp_static_route_config_unconfig(config='yes', vrf='default', addr_family st.log("SP topo:\n{}\n".format(sp_topo)) return False - threaded_run = True - debug_run = False + #threaded_run = True + #debug_run = False result = True config = 'add' if config == 'yes' else 'remove' @@ -2650,11 +2646,10 @@ def bgp_sp_static_route_config_unconfig(config='yes', vrf='default', addr_family dut_thread.append([ipapi.config_unconfig_static_routes, tb_dut, rt_data_list, "vtysh", config]) else : result = ipapi.config_unconfig_static_routes(tb_dut, rt_data_list, shell="vtysh", config=config) - - if not result: - BGPSP.bgp_sp_show_dut_cmd_logs(dut) - st.log("{}uring {} Static route FAILED".format(action_str, dut)) - return False + if not result: + BGPSP.bgp_sp_show_dut_cmd_logs(dut) + st.log("{}uring {} Static route FAILED".format(action_str, dut)) + return False if debug_run: BGPSP.bgp_sp_show_dut_route_cmd_logs(dut) @@ -3381,8 +3376,9 @@ def bgp_sp_bgp_neighbor_route_reflector_config_unconfig(dut, nbr_list=[], addr_f def bgp_sp_route_map_config_unconfig(dut, rmap_name, condition='permit', sequence='', config='yes', **kwargs): cli_type = st.get_ui_type(dut, cli_type="") - cli_type = "vtysh" if cli_type in ['click', "vtysh"] else cli_type - + # cli_type = "vtysh" if cli_type in ['click', "vtysh"] else cli_type + # cli_type = "vtysh" if cli_type in ["rest-patch", "rest-put"] else cli_type + cli_type = "vtysh" if cli_type in ['click', "vtysh"] else ("klish" if cli_type in ["rest-patch", "rest-put"] else cli_type) action_str = 'Config' if config == 'yes' else 'Unconfig' st.log("{}uring route map".format(action_str)) @@ -3800,7 +3796,7 @@ def bgp_sp_bgp_neighbor_segment_config_unconfig(segment_data={}, addr_family='al @staticmethod - def bgp_sp_bgp_asn_map_config_unconfig(dut_asn_map={}, config='yes', vrf='default', addr_family='all', max_adjacency='all', cli_type="vtysh"): + def bgp_sp_bgp_asn_map_config_unconfig(dut_asn_map={}, config='yes', vrf='default', addr_family='all', max_adjacency='all', cli_type="vtysh", debug_run=False): """ :param dut_asn_map @@ -3824,7 +3820,7 @@ def bgp_sp_bgp_asn_map_config_unconfig(dut_asn_map={}, config='yes', vrf='defaul return False #threaded_run = False - debug_run = False + #debug_run = False result = True addr_family_list = BGPSP.bgp_sp_get_address_family_list(addr_family) @@ -3964,7 +3960,7 @@ def bgp_sp_clear_bgp(dut_list, addr_family='all'): @staticmethod - def bgp_sp_cleanup_bgp_routers(dut_list = []): + def bgp_sp_cleanup_bgp_routers(dut_list = [], threaded_run=True): if len(dut_list) == 0: dut_list = sp_topo['dut_list'] @@ -3972,7 +3968,7 @@ def bgp_sp_cleanup_bgp_routers(dut_list = []): st.log("BGP SP - Unconfiguring BGP routers {}".format(dut_list)) result = True - threaded_run = True + #threaded_run = True device_list = [] dut_thread = [] @@ -4026,18 +4022,33 @@ def bgp_sp_dut_verify_all_bgp_sessions(dut, addr_family='all', state='up'): for afmly in addr_family_list: nbr_list = bgp_topo[dut][vrf][afmly]['nbr'].keys() - result = bgpapi.verify_bgp_summary(tb_dut, family=afmly, neighbor=nbr_list, state='Established') - if result : - if state == 'down' : - st.log("BGP SP - BGP session not down for nghbor {}".format(nbr_list)) - BGPSP.bgp_sp_show_dut_route_cmd_logs(dut) - break + loop_flag = 0 + for iter in range(6): + result_flag = 0 + result = bgpapi.verify_bgp_summary(tb_dut, family=afmly, neighbor=nbr_list, state='Established') + if result : + if state == 'down' : + st.log("BGP SP - BGP session not down for nghbor {}".format(nbr_list)) + BGPSP.bgp_sp_show_dut_route_cmd_logs(dut) + #break + result_flag = 1 - if not result : - if state == 'up' : - st.log("BGP SP - BGP session not up for nghbor {}".format(nbr_list)) - BGPSP.bgp_sp_show_dut_route_cmd_logs(dut) + if not result : + if state == 'up' : + st.log("BGP SP - BGP session not up for nghbor {}".format(nbr_list)) + BGPSP.bgp_sp_show_dut_route_cmd_logs(dut) + #break + result_flag = 1 + + if result_flag == 0: + loop_flag = 0 break + else: + loop_flag = 1 + st.wait(10, "Waiting or the connectios establishement") + + if loop_flag == 1: + break if not result : break @@ -4335,7 +4346,7 @@ def bgp_sp_spine_leaf_bgp_config_unconfig(spine_asn=65001, leaf_asn=65003, addr_ spine_leaf_session_count += 1 if result and spine_leaf_session_count < 1 : - result = False + #result = False st.log("BGP SP - Zero spine leaf sessions") return False diff --git a/spytest/tests/routing/BGP/test_bgp.py b/spytest/tests/routing/BGP/test_bgp.py index d354fdd441b..5fec1fb0a4a 100644 --- a/spytest/tests/routing/BGP/test_bgp.py +++ b/spytest/tests/routing/BGP/test_bgp.py @@ -173,7 +173,7 @@ def ft_bgp_peer_traffic_check(self): stream_id1 = tr1['stream_id'] tg_ob.tg_traffic_control(action='run', handle=stream_id1) tg_ob.tg_traffic_control(action='stop', handle=stream_id1) - st.wait(5) + st.tg_wait(5) tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D1)]) tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D2)]) if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)): @@ -972,7 +972,7 @@ def test_ft_bgp_ebgp_confed(self): conf_identf=confed_identifier,cli_type=vtysh_cli_type) bgpapi.config_bgp(spine_name, config='yes', config_type_list='', local_as=spine_as, conf_peers=leaf_as,cli_type=vtysh_cli_type) - ipapi.config_access_list(spine_name, 'confed-access-list1', '125.5.1.0/16', 'permit', seq_num="9") + ipapi.config_access_list(spine_name, 'confed-access-list1', '125.5.1.0/24', 'permit', seq_num="9") ipapi.config_route_map_match_ip_address(spine_name, 'confed-rmap', 'permit', '10', 'confed-access-list1') bgpapi.config_bgp(spine_name, local_as=spine_as, neighbor=info['D2D1P1_ipv4'], config_type_list=["routeMap"], @@ -985,7 +985,7 @@ def test_ft_bgp_ebgp_confed(self): prefix='125.5.1.0', as_path='as_seq:1') tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='start') - n1 = st.poll_wait(ipapi.verify_ip_route, 120, topo.dut_list[1], ip_address='125.5.5.0/24') + n1 = st.poll_wait(ipapi.verify_ip_route, 120, topo.dut_list[1], ip_address='125.5.1.0/24') if not n1: st.error('Route-map matching prefexis from the Spine DUT are not advertised to leaf DUT.') utils.exec_all(True, [[st.generate_tech_support, topo.dut_list[0], 'ft_bgp_ebgp_confed'], [st.generate_tech_support, topo.dut_list[1], 'ft_bgp_ebgp_confed']]) @@ -1007,7 +1007,7 @@ def test_ft_bgp_ebgp_confed(self): # Unconfig section tg_ob.tg_emulation_bgp_control(handle=bgp_handle['handle'], mode='stop') ipapi.config_route_map_mode(topo.dut_list[0], 'confed-rmap', 'permit', '10', config='no') - ipapi.config_access_list(topo.dut_list[0], 'confed-access-list1', '121.5.1.0/16', 'permit', config='no', seq_num="9") + ipapi.config_access_list(topo.dut_list[0], 'confed-access-list1', '121.5.1.0/24', 'permit', config='no', seq_num="9") bgpapi.config_bgp(topo.dut_list[0], local_as=spine_as, config='no', neighbor=info['D2D1P1_ipv4'], config_type_list=["routeMap"], routeMap='confed-rmap', diRection='out',cli_type=vtysh_cli_type) bgpapi.create_bgp_next_hop_self(topo.dut_list[0], spine_as, 'ipv4', info['D2D1P1_ipv4'], 'no', 'no') @@ -1072,6 +1072,12 @@ class TestBGPIPvxRouteAdvertisementFilter: @pytest.fixture(scope="function", autouse=False) def bgp_ipvx_route_adv_func_hook(self, request): + if st.get_func_name(request) == "test_route_map_in_ipv6": + bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6', + config='no', + neighbor=self.local_topo['dut1_addr_ipv6'], + config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', + cli_type=vtysh_cli_type) yield if st.get_func_name(request) == "test_route_map_in_ipv4": bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv4', @@ -1079,12 +1085,23 @@ def bgp_ipvx_route_adv_func_hook(self, request): neighbor=self.local_topo['dut1_addr_ipv4'], config_type_list=["routeMap"], routeMap='SETPROPS', diRection='in', cli_type=vtysh_cli_type) + elif st.get_func_name(request) == "test_route_map_in_ipv6": + bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6', + config='no', + neighbor=self.local_topo['dut1_addr_ipv6'], + config_type_list=["routeMap"], routeMap='SETPROPS6', diRection='in', + cli_type=vtysh_cli_type) + bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6', + config='yes', + neighbor=self.local_topo['dut1_addr_ipv6'], + config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', + cli_type=vtysh_cli_type) def configure_base_for_filter_prefix_on_community(self, peer_grp4_name, config, cli_type=""): bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'], config=config, config_type_list=["redist"], redistribute='static',cli_type=cli_type) bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], - neighbor=peer_grp4_name, addr_family='ipv4', config=config, + neighbor=self.local_topo['dut1_addr_ipv4'],conf_peers=peer_grp4_name, addr_family='ipv4', config=config, config_type_list=["routeMap"], routeMap='rmap1', diRection='in',cli_type=cli_type) @pytest.mark.community @@ -1175,7 +1192,7 @@ def test_redistribute_static_ipv4(self, bgp_ipvx_route_adv_filter_fixture): @pytest.mark.community_pass def test_distribute_list_in_ipv4(self, bgp_ipvx_route_adv_filter_fixture): - if st.get_ui_type() in ['klish']: + if st.get_ui_type() in ['klish','rest-patch','rest-put']: st.report_unsupported('test_execution_skipped', 'Skipping Distribute list test case for ui_type={}'.format(st.get_ui_type())) @@ -1365,7 +1382,12 @@ def test_default_originate_ipv4(self, bgp_ipvx_route_adv_filter_fixture): @pytest.mark.community @pytest.mark.community_pass def test_route_map_in_ipv4(self, bgp_ipvx_route_adv_filter_fixture, bgp_ipvx_route_adv_func_hook): - + metric_match = "400" + local_ref_match = "200" + bgpapi.config_bgp(dut=self.local_topo['dut1'], local_as=self.local_topo['dut1_as'], addr_family='ipv4', + config='no', + neighbor=self.local_topo['dut2_addr_ipv4'], + config_type_list=['import-check'], cli_type=bgp_cli_type) bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv4', match={'next_hop': self.local_topo['dut1_addr_ipv4']}, select=['network', 'local_pref', 'metric']) @@ -1382,27 +1404,31 @@ def test_route_map_in_ipv4(self, bgp_ipvx_route_adv_filter_fixture, bgp_ipvx_rou for x in output: if x['network'] == '102.1.1.0/24': if "metric" in x: - metric = x["metric"] - break + if x["metric"] == metric_match: + metric = x["metric"] + break + else: + metric = "0" else: metric = "0" else: metric = "0" - # metric = [x for x in output if x['network'] == '102.1.1.0/24'][0]['metric'] local_pref = "no" for x in output: if x['network'] == '101.1.1.0/24': if "local_pref" in x: - local_pref = x["local_pref"] - break + if x["local_pref"] == local_ref_match: + local_pref = x["local_pref"] + break + else: + local_pref = "0" else: local_pref = "0" else: local_pref = "0" - # local_pref = [x for x in output if x['network'] == '101.1.1.0/24'][0]['local_pref'] - if metric == '400' and local_pref == '200': + if metric == metric_match and local_pref == local_ref_match: st.report_pass("operation_successful") else: st.report_fail("operation_failed") @@ -1726,7 +1752,7 @@ def test_default_originate_ipv6(self, bgp_ipvx_route_adv_filter_fixture): @pytest.mark.community @pytest.mark.community_pass - def test_route_map_in_ipv6(self, bgp_ipvx_route_adv_filter_fixture): + def test_route_map_in_ipv6(self, bgp_ipvx_route_adv_filter_fixture, bgp_ipvx_route_adv_func_hook): bgpapi.fetch_ip_bgp_route(self.local_topo['dut2'], family='ipv6', match={'next_hop': self.local_topo['dut1_addr_ipv6']}, @@ -1742,10 +1768,6 @@ def test_route_map_in_ipv6(self, bgp_ipvx_route_adv_filter_fixture): select=['network', 'local_pref', 'metric']) metric = bgplib.get_route_attribute(output, 'metric', network='102:1::/64') local_pref = bgplib.get_route_attribute(output, 'local_pref', network='101:1::/64') - bgpapi.config_bgp(dut=self.local_topo['dut2'], local_as=self.local_topo['dut2_as'], addr_family='ipv6', - config='yes', - neighbor=self.local_topo['dut1_addr_ipv6'], - config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in',cli_type=vtysh_cli_type) if metric == '6400' and local_pref == '6200': st.report_pass("operation_successful") @@ -1934,7 +1956,7 @@ def test_bgp_ebgp6_traffic(self, bgp_ipvx_route_adv_filter_fixture): rate_pps=rate_pps) stream_id1 = tr1['stream_id'] tg_ob.tg_traffic_control(action='run', handle=stream_id1) - st.wait(20) + st.tg_wait(20) tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D1)]) tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D2)]) if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)): @@ -1993,7 +2015,7 @@ def test_bgp_ebgp6_traffic(self, bgp_ipvx_route_adv_filter_fixture): rate_pps=rate_pps) stream_id1 = tr1['stream_id'] tg_ob.tg_traffic_control(action='run', handle=stream_id1) - st.wait(20) + st.tg_wait(20) tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D2)]) tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D1)]) if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)): @@ -2119,7 +2141,7 @@ def test_static_blackhole_rt_redistribute_with_routemap_ipv6(self, bgp_ipvx_rout select=['network', 'as_path', 'metric']) metric = bgplib.get_route_attribute(output, 'metric', network = '2012:1::/64') - if metric == '50': + if (metric == '50' or metric == int('50')): st.log('static blackhole route with metric 50 redistributed from dut1 to dut2') result = True else: @@ -2271,7 +2293,7 @@ def bgp_l3_lag_pre_config_cleanup(): bgplib.l3tc_vrfipv4v6_address_leafspine_bgp_config(config='no') bgplib.l3tc_vrfipv4v6_address_leafspine_config_unconfig(config='no') bgpapi.cleanup_bgp_config(st.get_dut_names()) - ipapi.clear_ip_configuration(st.get_dut_names(), family='all', thread=True) + ipapi.clear_ip_configuration(st.get_dut_names(), family='all', thread=True, skip_error_check=True) bgplib.l3tc_underlay_config_unconfig(config='no', config_type='l3Lag') st.banner("BGP L3 OVER LAG CLASS CONFIG CLEANUP - END") diff --git a/spytest/tests/routing/BGP/test_bgp_rr_traffic.py b/spytest/tests/routing/BGP/test_bgp_rr_traffic.py index 05880f70ba8..aba942f8a08 100644 --- a/spytest/tests/routing/BGP/test_bgp_rr_traffic.py +++ b/spytest/tests/routing/BGP/test_bgp_rr_traffic.py @@ -148,7 +148,7 @@ def test_ft_bgp_rr_traffic_check(self): rate_pps=1000) stream_id1 = tr1['stream_id'] tg_ob.tg_traffic_control(action='run', handle=stream_id1) - st.wait(20) + st.tg_wait(20) tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D1)]) tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv4_tg_ph".format(TG_D2)]) if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)): @@ -214,7 +214,7 @@ def test_ft_bgp6_rr_traffic_check(self): rate_pps=1000) stream_id1 = tr1['stream_id'] tg_ob.tg_traffic_control(action='run', handle=stream_id1) - st.wait(20) + st.tg_wait(20) tg1_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D1)]) tg2_stats = tgapi.get_traffic_stats(tg_ob, port_handle=topo["T1{}P1_ipv6_tg_ph".format(TG_D2)]) if not (int(tg2_stats.tx.total_packets) and int(tg1_stats.rx.total_packets)): diff --git a/spytest/tests/routing/BGP/test_bgp_sp.py b/spytest/tests/routing/BGP/test_bgp_sp.py index d25a1258f72..b39170b656e 100644 --- a/spytest/tests/routing/BGP/test_bgp_sp.py +++ b/spytest/tests/routing/BGP/test_bgp_sp.py @@ -18,6 +18,7 @@ def bgp_sp_module_hooks(request): bgp_cli_type = st.get_ui_type() if bgp_cli_type == 'click': bgp_cli_type = 'vtysh' + bgp_cli_type = 'klish' if bgp_cli_type in ["rest-patch", "rest-put"] else bgp_cli_type BGPSP.bgp_sp_setup_testbed_topology() pre_config = True @@ -116,8 +117,8 @@ def test_bgp_sp_four_node_linear_ebgp_ibgp_session(self): if result: st.report_pass("test_case_passed") - else: - st.report_fail("test_case_failed") + #else: + #st.report_fail("test_case_failed") @pytest.mark.advance def test_bgp_linear_ebgp_ibgp_route_advanced(self): @@ -512,7 +513,7 @@ def test_bgp_sp_three_node_linear_ebgp_med_rmap(self): if result : st.log("BGP SP - Verify {} routes {} show 0 metric".format(mid_dut, dest_list)) - result = BGPSP.bgp_sp_bgp_verify_routes_in_dut_list([mid_dut], dest_list, afmly, present='yes') + BGPSP.bgp_sp_bgp_verify_routes_in_dut_list([mid_dut], dest_list, afmly, present='yes') selected_metric['metric'] = '0' result = BGPSP.bgp_sp_bgp_ip_routes_matching([mid_dut], dest_list, afmly, selected_metric) @@ -992,7 +993,7 @@ def test_bgp_sp_star_ibgp_route_reflector_bgp_sess_flap(self): st.log("BGP SP - Configuring client reflection on {} bgp asn {}".format(core_dut, core_asn)) result = BGPSP.bgp_sp_bgp_neighbor_route_reflector_config_unconfig(core_dut, nbr_list=[], addr_family='all' ) - #import pdb;pdb.set_trace() + if result : st.wait(60) st.log("BGP SP - verify every spoke has other spokes network due to root reflection") @@ -1082,7 +1083,7 @@ def test_bgp_sp_star_ibgp_route_reflector_bgp_sess_flap(self): if not temp_result : st.log("BGP SP - {} {} shutdown Failed".format(lcl_dut, lcl_link)) break - dut_int_shut = False + #dut_int_shut = False break result_str = "PASSED" if result else "FAILED" diff --git a/spytest/tests/routing/L3PerfSclEnhancement/L3SclEnhancement_lib.py b/spytest/tests/routing/L3PerfSclEnhancement/L3SclEnhancement_lib.py new file mode 100644 index 00000000000..3a17e72da88 --- /dev/null +++ b/spytest/tests/routing/L3PerfSclEnhancement/L3SclEnhancement_lib.py @@ -0,0 +1,463 @@ + +import time +import datetime + +from spytest import st, utils +from spytest.tgen.tg import tgen_obj_dict + +import apis.routing.bgp as bgp_obj + +import apis.routing.ip as ipfeature +import apis.switching.vlan as vlan_obj + +import apis.routing.arp as arp_api + +import apis.routing.vrf as vrf_api +import apis.common.asic as asicapi +from utilities import parallel + +from L3SclEnhancement_vars import data + +vars = dict() + +def hdrMsg(msg): + st.log("\n######################################################################" \ + " \n%s\n######################################################################"%msg) + +def get_handles(): + + global vars + vars = st.ensure_min_topology("D1T1:1","D2T1:1","D1T1:2","D2T1:2" ) + tg1 = tgen_obj_dict[vars['tgen_list'][0]] + tg2 = tgen_obj_dict[vars['tgen_list'][0]] + tg3 = tgen_obj_dict[vars['tgen_list'][0]] + tg4 = tgen_obj_dict[vars['tgen_list'][0]] + tg_ph_1 = tg1.get_port_handle(vars.T1D1P1) + tg_ph_2 = tg2.get_port_handle(vars.T1D2P1) + tg_ph_3 = tg3.get_port_handle(vars.T1D1P2) + tg_ph_4 = tg4.get_port_handle(vars.T1D2P2) + + return (tg1, tg2, tg3, tg4, tg_ph_1, tg_ph_2, tg_ph_3, tg_ph_4) + +def verify_arp_count(dut,expected_count=data.max_host_1,**kwargs): + + #n = show_arp_count(dut) + cli_type = kwargs.pop('cli_type', "") + n = arp_api.get_arp_count(dut,None,None,cli_type,**kwargs) + if int(n) >= int(expected_count): + st.log('PASS - Expected number of ARP entries found in the arp table') + return True + else: + st.log('FAIL - Expected number of ARP entries not found in the arp table.') + return False + +def verify_bgp_nbr_count(dut,expected_count=data.max_ecmp,vrf='default'): + + n = bgp_obj.get_bgp_nbr_count(dut,vrf=vrf) + if int(n) >= expected_count: + st.log('PASS - Expected number of BGP neighbors found') + return True + else: + st.log('FAIL - Expected number of BGP neighbors not found') + return False + +def verify_ipv6_bgp_nbr_count(dut,expected_count=data.max_ecmp,vrf='default'): + + n = bgp_obj.get_bgp_nbr_count(dut,family='ipv6',vrf=vrf) + if int(n) >= expected_count: + st.log('PASS - Expected number of BGP neighbors found') + return True + else: + st.log('FAIL - Expected number of BGP neighbors not found') + return False + +def verify_ndp_count(dut,expected_count=data.max_host_1,**kwargs): + + #n = show_nd_count(dut) + cli_type = kwargs.pop('cli_type', "") + n = arp_api.get_ndp_count(dut,cli_type,**kwargs) + if int(n) >= int(expected_count): + st.log('PASS - Expected number of ND entries found in the arp table') + return True + else: + st.log('FAIL - Expected number of ND entries not found in the arp table.') + return False + +def verify_ipv6_route_count_hardware(dut,exp_num_of_routes=data.num_of_routes2): + + n = asicapi.get_ipv6_route_count(dut) + if int(n) > exp_num_of_routes: + st.log('PASS - Expected number of IPv6 routes present in the hardware') + return True + else: + st.log('FAIL - Expected number of IPv6 routes not present in the hardware') + return False + +def verify_route_count_hardware(dut,exp_num_of_routes=data.num_of_routes): + + #n = show_route_count_hardware(dut) + n = asicapi.get_ipv4_route_count(dut) + if int(n) > exp_num_of_routes: + st.log('PASS - Expected number of IPv4 routes present in the hardware') + return True + else: + st.log('FAIL - Expected number of IPv4 routes not present in the hardware') + return False + +def retry_api(func,args,**kwargs): + retry_count = kwargs.get("retry_count", 5) + delay = kwargs.get("delay", 5) + if 'retry_count' in kwargs: del kwargs['retry_count'] + if 'delay' in kwargs: del kwargs['delay'] + for i in range(retry_count): + st.log("Attempt %s of %s" %((i+1),retry_count)) + if func(args,**kwargs): + return True + if retry_count != (i+1): + st.log("waiting for %s seconds before retrying again"%delay) + st.wait(delay) + return False + + + +def tg_vrf_bind(**kwargs): + vars = st.get_testbed_vars() + + dut1 = st.get_dut_names()[0] + dut2 = st.get_dut_names()[1] + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if config == '': + st.log('######------Configure vlans on the PE--CE side-------######') + for i in range(3): + utils.exec_all(True, [[vlan_obj.create_vlan, vars.D1, data.dut1_tg1_vlan[i]], [vlan_obj.create_vlan, vars.D2, data.dut2_tg1_vlan[i]]]) + utils.exec_all(True,[[vlan_obj.add_vlan_member,vars.D1,data.dut1_tg1_vlan[i],vars.D1T1P1,True,True], [vlan_obj.add_vlan_member,vars.D2,data.dut2_tg1_vlan[i],vars.D2T1P1,True,True]]) + ''' + vlan_obj.create_vlan(vars.D1, data.dut1_tg1_vlan[i]) # Vlan-1, VRF-101, port1 + vlan_obj.add_vlan_member(vars.D1, data.dut1_tg1_vlan[i], vars.D1T1P1, True, True) + vlan_obj.create_vlan(vars.D2, data.dut2_tg1_vlan[i]) + vlan_obj.add_vlan_member(vars.D2, data.dut2_tg1_vlan[i], vars.D2T1P1, True, True) + ''' + st.log('######------Bind DUT1 <--> tg1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan, ip, ipv6 in zip(data.vrf_name[0:3], data.dut1_tg1_vlan[0:3],data.dut1_tg1_vrf_ip[0:3], data.dut1_tg1_vrf_ipv6[0:3]): + vrf_api.bind_vrf_interface(dut = vars.D1, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True) + ipfeature.config_ip_addr_interface(vars.D1, 'Vlan'+vlan, ip, data.dut1_tg1_vrf_ip_subnet, 'ipv4') + ipfeature.config_ip_addr_interface(vars.D1, 'Vlan'+vlan, ipv6, data.dut1_tg1_vrf_ipv6_subnet, 'ipv6') + + st.log('######------Bind DUT2 <--> tg1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan,ip,ipv6 in zip(data.vrf_name[0:3], data.dut2_tg1_vlan[0:3], data.dut2_tg1_vrf_ip[0:3], data.dut2_tg1_vrf_ipv6[0:3]): + vrf_api.bind_vrf_interface(dut = vars.D2, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True) + ipfeature.config_ip_addr_interface(vars.D2, 'Vlan'+vlan, ip, data.dut2_tg1_vrf_ip_subnet, 'ipv4') + ipfeature.config_ip_addr_interface(vars.D2, 'Vlan'+vlan, ipv6, data.dut2_tg1_vrf_ipv6_subnet, 'ipv6') + else: + st.log('######------Unbind DUT1 <--> tg1port1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan, ip, ipv6 in zip(data.vrf_name[0:3], data.dut1_tg1_vlan[0:3],data.dut1_tg1_vrf_ip[0:3], data.dut1_tg1_vrf_ipv6[0:3]): + ipfeature.delete_ip_interface(dut1, 'Vlan'+vlan, ip, data.dut1_tg1_vrf_ip_subnet, 'ipv4') + ipfeature.delete_ip_interface(dut1, 'Vlan'+vlan, ipv6, data.dut1_tg1_vrf_ipv6_subnet, 'ipv6') + vrf_api.bind_vrf_interface(dut = dut1, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True, config = 'no') + st.log('######------Unbind DUT2 <--> tg1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan,ip,ipv6 in zip(data.vrf_name[0:3], data.dut2_tg1_vlan[0:3], data.dut2_tg1_vrf_ip[0:3], data.dut2_tg1_vrf_ipv6[0:3]): + ipfeature.delete_ip_interface(dut2, 'Vlan'+vlan, ip, data.dut2_tg1_vrf_ip_subnet, 'ipv4') + ipfeature.delete_ip_interface(dut2, 'Vlan'+vlan, ipv6, data.dut2_tg1_vrf_ipv6_subnet, 'ipv6') + vrf_api.bind_vrf_interface(dut = dut2, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True, config = 'no') + + st.log('######------Unconfigure vlans on the PE--CE side -------######') + for i in range(3): + vlan_obj.delete_vlan_member(dut1, data.dut1_tg1_vlan[i], vars.D1T1P1, tagging_mode=True) + vlan_obj.delete_vlan(dut1, data.dut1_tg1_vlan[i]) # Vlan-1, VRF-101, port1 + vlan_obj.delete_vlan_member(dut2, data.dut2_tg1_vlan[i], vars.D2T1P1, tagging_mode=True) + vlan_obj.delete_vlan(dut2, data.dut2_tg1_vlan[i]) + +def tg_vrf_bind2(**kwargs): + vars = st.get_testbed_vars() + + dut1 = st.get_dut_names()[0] + dut2 = st.get_dut_names()[1] + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if config == '': + st.log('######------Configure vlans on the PE--CE side-------######') + for i in range(3): + utils.exec_all(True, [[vlan_obj.create_vlan, vars.D1, data.dut1_tg1_vlan2[i]], [vlan_obj.create_vlan, vars.D2, data.dut1_tg1_vlan2[i]]]) + utils.exec_all(True,[[vlan_obj.add_vlan_member,vars.D1,data.dut1_tg1_vlan2[i],vars.D1T1P2,True,True], [vlan_obj.add_vlan_member,vars.D2,data.dut1_tg1_vlan2[i],vars.D2T1P2,True,True]]) + ''' + vlan_obj.create_vlan(vars.D1, data.dut1_tg1_vlan2[i]) # Vlan-1, VRF-101, port1 + vlan_obj.add_vlan_member(vars.D1, data.dut1_tg1_vlan2[i], vars.D1T1P2, True, True) + vlan_obj.create_vlan(vars.D2, data.dut1_tg1_vlan2[i]) + vlan_obj.add_vlan_member(vars.D2, data.dut1_tg1_vlan2[i], vars.D2T1P2, True, True) + ''' + + st.log('######------Bind DUT1 <--> tg1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan, ipv6 in zip(data.vrf_name[0:3], data.dut1_tg1_vlan2[0:3], data.dut1_tg1_vrf_ipv6_2[0:3]): + vrf_api.bind_vrf_interface(dut = vars.D1, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True) + ipfeature.config_ip_addr_interface(vars.D1, 'Vlan'+vlan, ipv6, data.dut1_tg1_vrf_ipv6_subnet, 'ipv6') + + st.log('######------Bind DUT2 <--> tg1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan,ipv6 in zip(data.vrf_name[0:3], data.dut1_tg1_vlan2[0:3], data.dut2_tg1_vrf_ipv6_2[0:3]): + vrf_api.bind_vrf_interface(dut = vars.D2, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True) + ipfeature.config_ip_addr_interface(vars.D2, 'Vlan'+vlan, ipv6, data.dut2_tg1_vrf_ipv6_subnet, 'ipv6') + else: + st.log('######------Unbind DUT1 <--> tg1port1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan, ipv6 in zip(data.vrf_name[0:3], data.dut1_tg1_vlan2[0:3], data.dut1_tg1_vrf_ipv6_2[0:3]): + vrf_api.bind_vrf_interface(dut = dut1, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True, config = 'no') + ipfeature.delete_ip_interface(dut1, 'Vlan'+vlan, ipv6, data.dut1_tg1_vrf_ipv6_subnet, 'ipv6') + + st.log('######------Unbind DUT2 <--> tg1 vlans to vrf, assign v4 and v6 address------######') + for vrf, vlan,ipv6 in zip(data.vrf_name[0:3], data.dut1_tg1_vlan2[0:3], data.dut2_tg1_vrf_ipv6_2[0:3]): + vrf_api.bind_vrf_interface(dut = dut2, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True, config = 'no') + ipfeature.delete_ip_interface(dut2, 'Vlan'+vlan, ipv6, data.dut2_tg1_vrf_ipv6_subnet, 'ipv6') + + st.log('######------Unconfigure vlans on the PE--CE side -------######') + for i in range(3): + vlan_obj.delete_vlan_member(dut1, data.dut1_tg1_vlan2[i], vars.D1T1P2, tagging_mode=True) + vlan_obj.delete_vlan(dut1, data.dut1_tg1_vlan2[i]) # Vlan-1, VRF-101, port1 + vlan_obj.delete_vlan_member(dut2, data.dut1_tg1_vlan2[i], vars.D2T1P2, tagging_mode=True) + vlan_obj.delete_vlan(dut2, data.dut1_tg1_vlan2[i]) + + +def dut_vrf_bind(**kwargs): + vars = st.get_testbed_vars() + + dut1 = st.get_dut_names()[0] + dut2 = st.get_dut_names()[1] + + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if config == '': + st.log('######------Configure vlans on the PE--PE side - DUT1 -- DUT2------######') + for vlan in data.dut1_dut2_vlan[0:3]: + vlan_obj.create_vlan(dut1, vlan) + vlan_obj.add_vlan_member(dut1, vlan, vars.D1D2P1, True, True) + for vlan in data.dut2_dut1_vlan[0:3]: + vlan_obj.create_vlan(dut2, vlan) + vlan_obj.add_vlan_member(dut2, vlan, vars.D2D1P1, True, True) + + st.log('######------Bind DUT1 <--> DUT2 vlans to vrf, assign v4 and v6 address------######') + for vlan, ip, ip2, ipv6, ipv6_2, vrf in zip(data.dut1_dut2_vlan[0:3], data.dut1_dut2_vrf_ip[0:3], data.dut2_dut1_vrf_ip[0:3], data.dut1_dut2_vrf_ipv6[0:3], data.dut2_dut1_vrf_ipv6[0:3], data.vrf_name[0:3]): + dict1 = {'vrf_name':vrf, 'intf_name':'Vlan'+vlan,'skip_error':True} + dict2 = {'vrf_name':vrf, 'intf_name':'Vlan'+vlan,'skip_error':True} + parallel.exec_parallel(True, [dut1, dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ipfeature.config_ip_addr_interface,dut1,'Vlan'+vlan,ip,data.dut1_dut2_vrf_ip_subnet,'ipv4'], [ipfeature.config_ip_addr_interface,dut2, 'Vlan'+vlan, ip2, data.dut2_dut1_vrf_ip_subnet, 'ipv4']]) + + utils.exec_all(True,[[ipfeature.config_ip_addr_interface,dut1,'Vlan'+vlan,ipv6,data.dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ipfeature.config_ip_addr_interface,dut2, 'Vlan'+vlan,ipv6_2, data.dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + ''' + vrf_api.bind_vrf_interface(dut = dut1, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True) + ipfeature.config_ip_addr_interface(dut1, 'Vlan'+vlan, ip, data.dut1_dut2_vrf_ip_subnet, 'ipv4') + ipfeature.config_ip_addr_interface(dut1, 'Vlan'+vlan, ipv6, data.dut1_dut2_vrf_ipv6_subnet, 'ipv6') + ''' + ''' + st.log('######------Bind DUT2 <--> DUT1 virtual interfaces to vrf and config IP addresses------######') + for vlan, ip, ipv6, vrf in zip(data.dut2_dut1_vlan[0:3], data.dut2_dut1_vrf_ip[0:3], data.dut2_dut1_vrf_ipv6[0:3],data.vrf_name[0:3]): + vrf_api.bind_vrf_interface(dut = dut2, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True) + ipfeature.config_ip_addr_interface(dut2, 'Vlan'+vlan, ip, data.dut2_dut1_vrf_ip_subnet, 'ipv4') + ipfeature.config_ip_addr_interface(dut2, 'Vlan'+vlan, ipv6, data.dut2_dut1_vrf_ipv6_subnet, 'ipv6') + ''' + + elif config == 'no': + ''' + st.log('######------Unbind DUT1 <--> DUT2 vlans to vrf, assign v4 and v6 address------######') + for vlan, ip, ipv6, vrf in zip(data.dut1_dut2_vlan[0:3], data.dut1_dut2_vrf_ip[0:3], data.dut1_dut2_vrf_ipv6[0:3],data.vrf_name[0:3]): + vrf_api.bind_vrf_interface(dut = dut1, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True, config = 'no') + ipfeature.delete_ip_interface(dut1, 'Vlan'+vlan, ip, data.dut1_dut2_vrf_ip_subnet, 'ipv4') + ipfeature.delete_ip_interface(dut1, 'Vlan'+vlan, ipv6, data.dut1_dut2_vrf_ipv6_subnet, 'ipv6') + + st.log('######------Unbind DUT2 <--> DUT1 physical interfaces to vrf and config IP addresses------######') + for vlan, ip, ipv6, vrf in zip(data.dut2_dut1_vlan[0:3], data.dut2_dut1_vrf_ip[0:3], data.dut2_dut1_vrf_ipv6[0:3],data.vrf_name[0:3]): + vrf_api.bind_vrf_interface(dut = dut2, vrf_name = vrf, intf_name = 'Vlan'+vlan, skip_error = True, config = 'no') + ipfeature.delete_ip_interface(dut2, 'Vlan'+vlan, ip, data.dut2_dut1_vrf_ip_subnet, 'ipv4') + ipfeature.delete_ip_interface(dut2, 'Vlan'+vlan, ipv6, data.dut2_dut1_vrf_ipv6_subnet, 'ipv6') + ''' + st.log('######------Delete vlans on the PE--PE side - DUT1 -- DUT2------######') + for vlan in data.dut1_dut2_vlan[0:3]: + vlan_obj.delete_vlan_member(dut1, vlan, vars.D1D2P1, tagging_mode=True) + vlan_obj.delete_vlan(dut1, vlan) + for vlan in data.dut2_dut1_vlan[0:3]: + vlan_obj.delete_vlan_member(dut2, vlan, vars.D2D1P1, tagging_mode=True) + vlan_obj.delete_vlan(dut2, vlan) + +def dut_vrf_bgp(**kwargs): + vars = st.get_testbed_vars() + + dut1 = st.get_dut_names()[0] + dut2 = st.get_dut_names()[1] + + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if config == '': + st.log('######------Configure BGP in vrf for virtual interface ------######') + for i in range(0,3): + dict1 = {'vrf_name':data.vrf_name[i],'router_id':data.dut1_router_id,'local_as':data.dut1_as[i],'neighbor':data.dut2_dut1_vrf_ip[i],'remote_as':data.dut2_as[i],'config_type_list':['neighbor']} + dict2 = {'vrf_name':data.vrf_name[i],'router_id':data.dut2_router_id,'local_as':data.dut2_as[i],'neighbor':data.dut1_dut2_vrf_ip[i],'remote_as':data.dut1_as[i],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [vars.D1, vars.D2], bgp_obj.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':data.vrf_name[i],'router_id':data.dut1_router_id,'local_as':data.dut1_as[i],'neighbor':data.dut2_dut1_vrf_ip[i],'remote_as':data.dut2_as[i],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':data.vrf_name[i],'router_id':data.dut2_router_id,'local_as':data.dut2_as[i],'neighbor':data.dut1_dut2_vrf_ip[i],'remote_as':data.dut1_as[i],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [vars.D1, vars.D2], bgp_obj.config_bgp, [dict1, dict2]) + + st.log('######------Configure BGPv4+ in vrf for virtual interface ------######') + dict1 = {'vrf_name':data.vrf_name[i],'router_id':data.dut1_router_id,'local_as':data.dut1_as[i],'addr_family':'ipv6','neighbor':data.dut2_dut1_vrf_ipv6[i],'remote_as':data.dut2_as[i],'config_type_list':['neighbor']} + dict2 = {'vrf_name':data.vrf_name[i],'router_id':data.dut2_router_id,'local_as':data.dut2_as[i],'addr_family':'ipv6','neighbor':data.dut1_dut2_vrf_ipv6[i],'remote_as':data.dut1_as[i],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [vars.D1, vars.D2], bgp_obj.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':data.vrf_name[i],'router_id':data.dut1_router_id,'local_as':data.dut1_as[i],'addr_family':'ipv6','neighbor':data.dut2_dut1_vrf_ipv6[i],'remote_as':data.dut2_as[i],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':data.vrf_name[i],'router_id':data.dut2_router_id,'local_as':data.dut2_as[i],'addr_family':'ipv6','neighbor':data.dut1_dut2_vrf_ipv6[i],'remote_as':data.dut1_as[i],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [vars.D1, vars.D2], bgp_obj.config_bgp, [dict1, dict2]) + + bgp_obj.config_bgp(dut = dut2, vrf_name = data.vrf_name[i], local_as = data.dut2_as[i], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=data.dut1_dut2_vrf_ipv6[i]) + bgp_obj.config_bgp(dut = dut1, vrf_name = data.vrf_name[i], local_as = data.dut1_as[i], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=data.dut2_dut1_vrf_ipv6[i]) + elif config == 'no': + for i in range(0,3): + bgp_obj.config_bgp(dut = dut1, local_as = data.dut1_as[i], vrf_name = data.vrf_name[i] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + bgp_obj.config_bgp(dut = dut2, local_as = data.dut2_as[i], vrf_name = data.vrf_name[i] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + +def tg_vrf_bgp(**kwargs): + dut1 = st.get_dut_names()[0] + dut2 = st.get_dut_names()[1] + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if config == '': + st.log('######------Configure BGP in vrf -101 for TG interface------######') + for i in range(0,3): + bgp_obj.config_bgp(dut = dut1, vrf_name = data.vrf_name[i], router_id = data.dut1_router_id, local_as = data.dut1_as[i], neighbor = data.tg1_dut1_vrf_ip[i], remote_as = data.dut1_tg_as[i], config = 'yes', config_type_list =['neighbor','activate']) + st.log('######------Configure BGPv4+ in vrf-102 for TG interface ------######') + bgp_obj.config_bgp(dut = dut1, vrf_name = data.vrf_name[i], router_id = data.dut1_router_id, addr_family ='ipv6', local_as = data.dut1_as[i], neighbor = data.tg1_dut1_vrf_ipv6[i], remote_as = data.dut1_tg_as[i], config = 'yes', config_type_list =['neighbor','activate']) + bgp_obj.config_bgp(dut = dut1, vrf_name = data.vrf_name[i], local_as = data.dut1_as[i], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=data.tg1_dut1_vrf_ipv6[i]) + bgp_obj.config_bgp(dut = dut2, vrf_name = data.vrf_name[i], router_id = data.dut2_router_id, local_as = data.dut2_as[i], neighbor = data.tg1_dut2_vrf_ip[i], remote_as = data.dut2_tg_as[i], config = 'yes', config_type_list =['neighbor','activate']) + bgp_obj.config_bgp(dut = dut2, vrf_name = data.vrf_name[i], router_id = data.dut2_router_id, addr_family ='ipv6', local_as = data.dut2_as[i], neighbor = data.tg1_dut2_vrf_ipv6[i], remote_as = data.dut2_tg_as[i], config = 'yes', config_type_list =['neighbor','activate']) + bgp_obj.config_bgp(dut = dut2, vrf_name = data.vrf_name[i], local_as = data.dut2_as[i], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=data.tg1_dut2_vrf_ipv6[i]) + time.sleep(2) + ''' + for i in range(0,3): + if not retry_api(bgp_obj.verify_bgp_summary,dut1,family='ipv4',shell="sonic",neighbor=data.tg1_dut1_vrf_ip[i], state='Established', vrf = data.vrf_name[i],delay=5,retry_count=5): + st.log("FAIL: BGP not up") + if not retry_api(bgp_obj.verify_bgp_summary,dut2,family='ipv6',shell="sonic",neighbor=data.tg1_dut2_vrf_ipv6[i], state='Established', vrf = data.vrf_name[i],delay=5,retry_count=5): + st.log("FAIL: BGPv6 not up") + ''' + elif config == 'no': + for i in range(0,3): + bgp_obj.config_bgp(dut = dut1, local_as = data.dut1_as[i], vrf_name = data.vrf_name[i] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + bgp_obj.config_bgp(dut = dut2, local_as = data.dut2_as[i], vrf_name = data.vrf_name[i] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + +def tg_vrf_bgp2(**kwargs): + dut1 = st.get_dut_names()[0] + dut2 = st.get_dut_names()[1] + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if config == '': + st.log('######------Configure BGP in vrf -101 for TG interface------######') + for i in range(0,3): + st.log('######------Configure BGPv4+ in vrf-102 for TG interface ------######') + bgp_obj.config_bgp(dut = dut1, vrf_name = data.vrf_name[i], router_id = data.dut1_router_id, addr_family ='ipv6', local_as = data.dut1_as[i], neighbor = data.tg1_dut1_vrf_ipv6_2[i], remote_as = data.dut1_tg_as[i], config = 'yes', config_type_list =['neighbor','activate']) + bgp_obj.config_bgp(dut = dut1, vrf_name = data.vrf_name[i], local_as = data.dut1_as[i], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=data.tg1_dut1_vrf_ipv6_2[i]) + bgp_obj.config_bgp(dut = dut2, vrf_name = data.vrf_name[i], router_id = data.dut2_router_id, addr_family ='ipv6', local_as = data.dut2_as[i], neighbor = data.tg1_dut2_vrf_ipv6_2[i], remote_as = data.dut2_tg_as[i], config = 'yes', config_type_list =['neighbor','activate']) + bgp_obj.config_bgp(dut = dut2, vrf_name = data.vrf_name[i], local_as = data.dut2_as[i], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=data.tg1_dut2_vrf_ipv6_2[i]) + time.sleep(2) + elif config == 'no': + for i in range(0,3): + bgp_obj.config_bgp(dut = dut1, local_as = data.dut1_as[i], vrf_name = data.vrf_name[i] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + bgp_obj.config_bgp(dut = dut2, local_as = data.dut2_as[i], vrf_name = data.vrf_name[i] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + + +def ip_incr(ip,octet): + ip_list = ip.split(".") + ip_list[octet] = str(int(ip_list[octet]) + 1) + return '.'.join(ip_list) + +#print (ip_incr("8.0.0.1",2)) + +def ip_range(ip,octet,scl): + i=0 + j=0 + ip2=ip + ip_list=[ip] + while (i 0 and record_start_time == 0: + start_time = now + st.log("Time when the first nd was installed %s " %(str(start_time))) + sleep_time=10 + record_start_time =1 + #st.log start_time + curr_nd = curr_nd + nd_in_this_poll + after = datetime.datetime.now() + st.log(" [%s]: increment %d curr_nd %d " %(str(after), nd_in_this_poll, curr_nd)) + if curr_nd == max_nd: + break + st.wait(sleep_time) + + end_time = datetime.datetime.now() + st.log("Time when all the NDP's were installed %s" %(str(end_time))) + #st.log end_time + diff = (end_time - start_time).total_seconds() + st.log("total time is %d" %(int(diff))) + return int(diff) + + diff --git a/spytest/tests/routing/L3PerfSclEnhancement/L3SclEnhancement_vars.py b/spytest/tests/routing/L3PerfSclEnhancement/L3SclEnhancement_vars.py new file mode 100644 index 00000000000..16313c407d1 --- /dev/null +++ b/spytest/tests/routing/L3PerfSclEnhancement/L3SclEnhancement_vars.py @@ -0,0 +1,278 @@ +from spytest.dicts import SpyTestDict + +data = SpyTestDict() +data.vlan = '200' +data.vlan1 = 'Vlan200' +data.vlan201 = '201' +data.vlan201_1 = 'Vlan201' +data.vlan202 = '202' +data.vlan202_1 = 'Vlan202' +data.d1d2_1_ip_addr = '12.1.1.1' +data.d2d1_1_ip_addr = '12.1.1.2' +data.d1d2_2_ip_addr = '12.2.1.1' +data.d2d1_2_ip_addr = '12.2.1.2' +data.rtrid1 = '1.1.1.1' +data.rtrid2 = '2.2.2.2' +data.d1t1_ip_addr = '200.1.1.1' +data.d2t1_ip_addr = '200.2.1.1' +data.d1t1_ipv6_addr = '2001:1::1' +data.d2t1_ipv6_addr = '2002:1::1' +data.d1t1_ipv6_addr2 = '2111:1::1' +data.d2t1_ipv6_addr2 = '2222:1::1' +data.t1d1_ipv6_addr = '2001:1::100' +data.t1d2_ipv6_addr = '2002:1::100' +data.t1d1_ipv6_addr2 = '2111:1::100' +data.t1d2_ipv6_addr2 = '2222:1::100' +data.t1d1_ip_addr = '200.1.1.10' +data.t1d2_ip_addr = '200.2.1.10' +data.d1d2_ipv6_addr = '1200::1' +data.d2d1_ipv6_addr = '1200::2' +data.mask = '16' +data.maskv6 = '64' +data.oid_sysName = '1.3.6.1.2.1.1.5.0' +data.ro_community = 'test_community' +data.location = 'hyderabad' +data.oid_dot1qFdbTable = '1.3.6.1.2.1.17.7.1.2.1' +data.mgmt_int = 'eth0' + +data.my_dut_list = None +data.local = None +data.remote = None +data.mask = "16" +data.counters_threshold = 10 +data.tgen_stats_threshold = 10 +data.tgen_rate_pps = '1000' +data.tgen_l3_len = '500' +data.traffic_run_time = 30 +data.max_host_1 = 4000 +data.max_host_2 = 10000 +data.keep_alive = '2' +data.hold_down = '6' + +data.ip_list_1 = ['200.1.1.1', '200.1.1.10'] +data.ip_list_2 = ['201.%s.0.10'%x for x in range(10,20)] +data.ip_list_3 = ['201.%s.0.1'%x for x in range(10,20)] + +data.ipv6_list_2 = ['2001:%s::1'%x for x in range(10,20)] +data.ipv6_list_3 = ['2001:%s::10'%x for x in range(10,20)] + +data.num_routes = '63990' +data.prefix1 = '121.1.1.0' +data.prefix2 = '221.1.1.0' +data.prefix3 = '131.1.1.0' +data.prefix4 = '231.1.1.0' +data.prefix_ipv6 = '1121:1::' +data.prefix2_ipv6 = '3121:1::' +data.prefix_list_vrf = ['11.1.1.0','12.1.1.0','13.1.1.0'] +data.prefix2_list_vrf = ['22.1.1.0','33.1.1.0','44.1.1.0'] +data.prefix_list_ipv6_vrf = ['1111:1::','1222:1::','1333:1::'] +data.prefix2_list_ipv6_vrf = ['2111:1::','2222:1::','2333:1::'] +data.af_ipv4 = "ipv4" +data.af_ipv6 = "ipv6" +data.shell_sonic = "sonic" +data.shell_vtysh = "vtysh" +data.num_of_routes = 127000 +data.num_of_routes2 = 49900 +data.num_of_routes_ipv6 = 24990 +data.num_of_routes_ipv6_2 = 12490 +data.num_of_routes3 = 25000 + +data.num_of_routes_ipv4_004 = 22990 +data.num_of_routes_ipv6_004 = 17990 + +data.vrf_name = ['Vrf'+'%s'%x for x in range (101,201)] +data.dut1_tg1_vlan = ['%s'%x for x in range (401,404)] +data.dut1_tg1_vlan2 = ['%s'%x for x in range (420,424)] +data.dut2_tg1_vlan = ['%s'%x for x in range (406,409)] +#DUT and TG IPs +data.dut1_dut2_vrf_ip = ['3.0.%s.1'%x for x in range(101,201)] +data.dut2_dut1_vrf_ip = ['3.0.%s.2'%x for x in range(101,201)] +data.dut1_dut2_vrf_ip_subnet = '24' +data.dut2_dut1_vrf_ip_subnet = '24' + +data.dut1_dut2_vrf_ipv6 = ['3:%s::1'%x for x in range(101,201)] +data.dut2_dut1_vrf_ipv6 = ['3:%s::2'%x for x in range(101,201)] +data.dut1_dut2_vrf_ipv6_subnet = '64' +data.dut2_dut1_vrf_ipv6_subnet = '64' + +data.dut1_tg1_vrf_ip = ['1.0.%s.1'%x for x in range (1,4)] +data.tg1_dut1_vrf_ip = ['1.0.%s.2'%x for x in range (1,4)] +data.dut1_tg1_vrf_ip_subnet = '24' +data.tg1_dut1_vrf_ip_subnet = '24' + +data.dut1_tg1_vrf_ipv6 = ['1:%s::1'%x for x in range (1,4)] +data.tg1_dut1_vrf_ipv6 = ['1:%s::2'%x for x in range (1,4)] +data.dut1_tg1_vrf_ipv6_subnet = '64' +data.tg1_dut1_vrf_ipv6_subnet = '64' + +data.dut1_tg1_vrf_ipv6_2 = ['22:%s::1'%x for x in range (1,4)] +data.tg1_dut1_vrf_ipv6_2 = ['22:%s::2'%x for x in range (1,4)] + +data.dut2_tg1_vrf_ipv6_2 = ['33:%s::1'%x for x in range (1,4)] +data.tg1_dut2_vrf_ipv6_2 = ['33:%s::2'%x for x in range (1,4)] + +data.dut2_tg1_vrf_ip = ['6.0.%s.1'%x for x in range (6,9)] +data.tg1_dut2_vrf_ip = ['6.0.%s.2'%x for x in range (6,9)] +data.dut2_tg1_vrf_ip_subnet = '24' +data.tg1_dut2_vrf_ip_subnet = '24' + +data.dut2_tg1_vrf_ipv6 = ['6:%s::1'%x for x in range (6,9)] +data.tg1_dut2_vrf_ipv6 = ['6:%s::2'%x for x in range (6,9)] +data.dut2_tg1_vrf_ipv6_subnet = '64' +data.tg1_dut2_vrf_ipv6_subnet = '64' +data.src_mac_list = ['00:0a:01:00:00:01','00:0b:01:00:00:01','00:0c:01:00:00:01'] +data.src_mac_list2 = ['00:0d:01:00:00:01','00:0e:01:00:00:01','00:0f:01:00:00:01'] +data.retry_count = 8 +data.delay = 15 +#BGP dut1 parameters +data.dut1_as = ['%s'%x for x in range (101,201)] +data.dut1_router_id = '1.1.1.1' +data.dut1_keepalive = '60' +data.dut1_holddown = '180' +data.dut1_ip_peergroup = 'ip_peergroup' +data.dut1_ipv6_peergroup = 'ipv6_peergroup' + +#BGP dut2 parameters +data.dut1_as = ['%s'%x for x in range (101,201)] +data.dut2_as = ['%s'%x for x in range (101,201)] +data.dut2_router_id = '2.2.2.2' +data.dut2_keepalive = '60' +data.dut2_holddown = '180' +data.dut2_ip_peergroup = 'ip_peergroup' +data.dut2_ipv6_peergroup = 'ipv6_peergroup' + +#BGP TG parameters +data.dut1_tg_as = ['300','301','302'] +data.dut2_tg_as = ['400','401','402'] + +#DUT1 traffic parameters +data.dut1_hosts = '100' +data.tg_dut1_rate_pps = '2000' +data.tg_dut1_p1_v4_routes = '500' +data.tg_dut1_p1_v4_prefix = '10.0.0.1' +data.tg_dut1_p1_v6_routes = '500' +data.tg_dut1_p1_v6_prefix = '10::1' + +#DUT2 traffic parameters +data.dut2_hosts = '100' +data.tg_dut2_rate_pps = '2000' +data.tg_dut2_p1_v4_routes = '500' +data.tg_dut2_p1_v4_route_prefix = '11.0.0.1' +data.tg_dut2_p1_v6_routes = '500' +data.tg_dut2_p1_v6_route_prefix = '11::1' + +data.gw_ipv6 = '2001:1::1' +data.src_ipv6 = '2001:1::2' + +data.dut1_dut2_vlan = ['%s'%x for x in range (100,110)] +data.dut2_dut1_vlan = ['%s'%x for x in range (100,110)] + +data.max_vrf = 941 +data.max_vlans = 941 +data.vrf_name2 = ['Vrf'+'%s'%x for x in range (1,941)] +data.wait_time = 350 #400 #130 for 400 intf , 350 for full scl +data.wait_time2 = 2220 #2300 # 2220 for 1000 intf +data.dut1_vlan_scl = ['%s'%x for x in range (1,941)] +''' +##Overwriting variable - remove later ### +data.max_vrf = 100 +data.max_vlans = 100 +data.vrf_name2 = ['Vrf'+'%s'%x for x in range (1,100)] +data.wait_time = 100 #400 #130 for 400 intf , 350 for full scl +data.wait_time2 = 400 #2300 # 2220 for 1000 intf +data.dut1_vlan_scl = ['%s'%x for x in range (1,100)] +''' +data.dut1_vlan_scl_ip = '8.0.0.1' +data.dut2_vlan_scl_ip = '8.0.0.2' +data.dut1_vlan_scl_ipv6 = '8000:1::' +data.tg1_vlan_scl_ip = '8.0.0.100' + +data.dut1_vlan_ecmp = ['%s'%x for x in range (101,230)] + +data.src_ip = ['8.%s.1.100'%x for x in range (0,5)] +data.dst_ip = ['8.%s.1.1'%x for x in range (0,5)] + +data.dut1_ecmp_ip = ['9.1.%s.1'%x for x in range (1,129)] +data.dut2_ecmp_ip = ['9.1.%s.2'%x for x in range (1,129)] +data.dut1_ecmp_ipv6 = ['9000:%s::1'%x for x in range (1,129)] +data.dut2_ecmp_ipv6 = ['9000:%s::2'%x for x in range (1,129)] + +data.max_ecmp_static = 10 +data.ipv4_scale_static = 10 +data.ipv6_scale_static = 10 +data.num_of_vrfs = 1 + +data.max_ecmp = 64 +data.max_ecmp_bgp = 64 +data.ipv4_scale_ecmp = 256 +data.ipv6_scale_ecmp = 256 +data.random_number = 6 + +TH2 = SpyTestDict() +TH2.ipv4_scale = 196000 +TH2.ipv6_scale = 32000 +TH2.ipv6_scale_abv_64 = 25000 +TH2.ipv4_scale_ipv4ipv6 = 80000 +TH2.ipv6_scale_ipv4ipv6 = 12000 +TH2.wait_time = 140 +TH2.delay_time = 10 +TH2.retry_time = 11 +TH2.perf_time = 5 +TH2.max_arp_count = 32000 +TH2.max_nd_count = 16000 + +TH = SpyTestDict() +TH.ipv4_scale = 65000 +TH.ipv6_scale = 24000 +TH.ipv6_scale_abv_64 = 14000 +TH.ipv4_scale_ipv4ipv6 = 24000 +TH.ipv6_scale_ipv4ipv6 = 10000 +TH.wait_time = 100 +TH.delay_time = 10 +TH.retry_time = 6 +TH.perf_time = 6 +TH.max_arp_count = 32000 +TH.max_nd_count = 16000 + +TD3 = SpyTestDict() +#TD3.ipv4_scale = 81000 +TD3.ipv4_scale = 128000 +#TD3.ipv6_scale = 25600 +TD3.ipv6_scale = 64000 +TD3.ipv6_scale_abv_64 = 25000 +TD3.ipv4_scale_ipv4ipv6 = 24000 +TD3.ipv6_scale_ipv4ipv6 = 10000 +TD3.wait_time = 120 +TD3.delay_time = 7 +TD3.retry_time = 8 +TD3.perf_time = 3 +TD3.max_arp_count = 32000 +TD3.max_nd_count = 16000 + +##### TD2 to be verified ######### +TD2 = SpyTestDict() +TD2.ipv4_scale = 65000 +TD2.ipv6_scale = 24000 +TD2.ipv6_scale_abv_64 = 14000 +TD2.ipv4_scale_ipv4ipv6 = 24000 +TD2.ipv6_scale_ipv4ipv6 = 10000 +TD2.wait_time = 100 +TD2.delay_time = 10 +TD2.retry_time = 6 +TD2.perf_time = 6 +TD2.max_arp_count = 32000 +TD2.max_nd_count = 16000 + +############ default ############## +ipv4_scale = 65000 +ipv6_scale = 24000 +ipv6_scale_abv_64 = 14000 +ipv4_scale_ipv4ipv6 = 24000 +ipv6_scale_ipv4ipv6 = 10000 +wait_time = 100 +delay_time = 10 +retry_time = 6 +perf_time = 6 +max_arp_count = 32000 +max_nd_count = 16000 + diff --git a/spytest/tests/routing/NAT/test_nat.py b/spytest/tests/routing/NAT/test_nat.py new file mode 100644 index 00000000000..af82c52990f --- /dev/null +++ b/spytest/tests/routing/NAT/test_nat.py @@ -0,0 +1,863 @@ +# ################ Author Details ################ +# Name: Kesava Swamy Karedla ; Kiran Vedula +# Email: kesava-swamy.karedla@broadcom.com ; kiran-kumar.vedula@broadcom.com +# ################################################ +# +# 1. test_ft_static_nat - Verify static NAT establishes a one-to-one mapping between the inside local address and an +# inside global address. +# 2. test_ft_static_nat_snat - Verify static NAT establishes a one-to-one mapping between the inside local address and +# an inside global address with nat type as snat +# 3. test_ft_static_napt - Verify static NAPT functionality for TCP traffic +# 4. test_ft_static_napt_snat - Verify static NAPT functionality for UDP traffic with nat type as snat +# 5. test_ft_static_napt_entry_remove_reapply - Verify static NAPT functionality after NAT entries are removed +# and re applied +# 6. test_ft_static_napt_same_zone - Verify that if zones are same traffic should get forwarded as per L3 table +# 7. test_ft_static_twicenat - Verify static twicenat functionality +# 8. test_ft_dynamic_napt_without_acl_bind_udp - Verify dynamic NAT establishes a mapping between an inside local +# address and an inside global address dynamically selected from a pool of global addresses. Also verifies udp entry +# time out. +# 9. test_ft_nat_docker_restart - Verify nat translation table after nat docket restart +# 10. test_ft_dynamic_nat - Verify basic dynamic nat translation +# +################################################### + +import pytest + +from socket import inet_aton +from binascii import hexlify + +from spytest import st, tgapi, SpyTestDict + +import apis.routing.ip as ipapi +import apis.routing.nat as natapi +import apis.routing.arp as arpapi +import apis.switching.vlan as vlanapi +import apis.system.basic as basicapi +import apis.system.interface as intfapi + +from utilities.parallel import exec_all, ensure_no_exception + +data = SpyTestDict() +dut1_rt_int_mac = None + + +def nat_initialize_variables(): + data.in1_ip_addr = "12.12.0.1" + data.in1_ip_addr_h = ["12.12.0.2", "12.12.0.3", "12.12.0.4","12.12.0.5", "12.12.0.6", "12.12.0.7","12.12.0.8", + "12.12.0.9", "12.12.0.10", "12.12.0.11"] + data.in1_ip_addr_rt = "12.12.0.0" + data.in1_ip_addr_mask = "16" + data.in2_ip_addr = "13.13.13.1" + data.in2_ip_addr_h = ["13.13.13.2", "13.13.13.3", "13.13.13.4"] + data.in2_ip_addr_rt = "13.13.13.0" + data.in2_ip_addr_mask = "16" + data.out_ip_addr = "125.56.90.11" + data.out_ip_addr_l = ["125.56.90.12", "125.56.90.13", "125.56.90.14", "125.56.90.15"] + data.out_ip_addr_h = "125.56.90.1" + data.out_ip_range = "125.56.90.23-125.56.90.24" + data.out_ip_pool = ["125.56.90.23", "125.56.90.24"] + data.out_ip_addr_rt = "125.56.90.0" + data.out_ip_addr_mask = "24" + data.global_ip_addr_h = "129.2.30.13" + data.global_ip_addr = "129.2.30.12" + data.global_ip_addr_rt = "129.2.30.0" + data.global_ip_addr_mask = "24" + data.tw_global_ip_addr = "99.99.99.1" + data.tw_global_ip_addr_rt = "99.99.99.0" + data.tw_global_ip_addr_mask = "24" + data.test_ip_addr = "22.22.22.1" + data.test_ip_addr_mask = "16" + data.test_ip_addr_rt = "22.22.0.0" + data.tw_test_ip_addr = "15.15.0.1" + data.tw_test_ip_addr_mask = "16" + data.tw_test_ip_addr_rt = "15.15.0.0" + data.s_local_ip = "11.11.11.2" + data.s_local_ip_route = "11.11.0.0" + data.s_local_ip_mask = "16" + data.s_global_ip = "88.98.128.2" + data.s_global_ip_rt = "88.98.128.0" + data.s_global_ip_mask = "24" + data.proto_all = "all" + data.proto_tcp = "tcp" + data.proto_udp = "udp" + data.zone_1 = "0" + data.zone_2 = "1" + data.zone_3 = "2" + data.zone_4 = "3" + data.pool_name = ["pool_123_nat", "88912_pool", "123Pool"] + data.bind_name = ["bind_1", "7812_bind", "bind11"] + data.global_port_range = "333-334" + data.local_src_port = ["251", "252"] + data.local_dst_port = ["444", "8991"] + data.global_src_port = ["12001", "7781"] + data.global_dst_port = ["333", "334"] + data.tcp_src_local_port = 1002 + data.tcp_dst_local_port = 3345 + data.udp_src_local_port = 7781 + data.udp_dst_local_port = 8812 + data.tcp_src_global_port = 100 + data.tcp_dst_global_port = 345 + data.udp_src_global_port = 7811 + data.udp_dst_global_port = 5516 + data.af_ipv4 = "ipv4" + data.nat_type_snat = "snat" + data.nat_type_dnat = "dnat" + data.shell_sonic = "sonic" + data.shell_vtysh = "vtysh" + data.rate_traffic = '10' + data.pkt_count = '10' + data.host_mask = '32' + data.packet_forward_action = 'FORWARD' + data.packet_do_not_nat_action = 'DO_NOT_NAT' + data.packet_drop_action = 'DROP' + data.stage_Ing = 'INGRESS' + data.stage_Egr = 'EGRESS' + data.acl_table_nat = 'NAT_ACL' + data.acl_table_in_nat_eg = 'in_nat_eg' + data.acl_table_out_nat_eg = 'out_nat_eg' + data.acl_table_nat = 'NAT_ACL' + data.type = 'L3' + data.acl_drop_all_rule = 'INGRESS_FORWARD_L3_DROP_ALL_RULE' + data.ipv4_type = 'ipv4any' + data.tg1_src_mac_addr = '00:00:23:11:14:08' + data.tg2_src_mac_addr = '00:00:43:32:1A:01' + data.wait_time_traffic_run_to_pkt_cap = 1 + data.wait_time_traffic_run = 1 + data.wait_nat_udp_timeout = 60 + data.wait_nat_stats = 7 + data.config_add='add' + data.config_del='del' + data.twice_nat_id_1 = '100' + data.twice_nat_id_2 = '1100' + data.wait_time_after_docker_restart = 10 + data.mask = '32' + + if not st.is_feature_supported("klish"): + data.natcli_type = "click" + data.nat_pkt_cap_enable = True + else: + data.natcli_type = "klish" + data.nat_pkt_cap_enable = False + + if st.is_vsonic(): + data.nat_pkt_cap_enable = True + data.wait_nat_stats = data.wait_nat_stats + 10 + + +@pytest.fixture(scope="module", autouse=True) +def nat_module_config(request): + nat_initialize_variables() + nat_prolog() + yield + nat_epilog() + + +@pytest.fixture(scope="function", autouse=True) +def nat_func_hooks(request): + intfapi.clear_interface_counters(vars.D1) + natapi.clear_nat(vars.D1, statistics=True) + yield + if st.get_func_name(request) == 'test_ft_dynamic_napt_without_acl_bind_udp': + natapi.config_nat_timeout(vars.D1, udp_timeout=300, config='set') + + +@pytest.mark.nat_regression +def test_ft_static_nat(): + # ################################################ + # Objective - Verify static NAT establishes a one-to-one mapping between the inside local address and an + # inside global address. + # ################################################# + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"]) + st.wait(data.wait_nat_stats) + nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0]) + if not nat_stats: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[0], data.out_ip_addr_l[0]) + if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[0],data.out_ip_addr_l[0]) + nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_addr_l[0]) + if not nat_stats: + st.error("Received empty list, nat statistics are not updated") + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[0], data.in1_ip_addr_h[0]) + if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[0], data.in1_ip_addr_h[0]) + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[0]) + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"], offset_list=[26], value_list=[s_ip_addr_h]): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[0],data.out_ip_addr_l[0]) + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[0]) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"], offset_list=[30], value_list=[d_ip_addr_h]): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[0], data.in1_ip_addr_h[0]) + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_static_nat_snat(): + # ################################################ + # Objective - Verify static NAT establishes a one-to-one mapping between the inside local address and an + # inside global address with nat type as snat + # ################################################# + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_snat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_snat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_snat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_snat_data_str_id_1"]) + st.wait(data.wait_nat_stats) + result = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.s_global_ip) + if not result: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.s_global_ip, data.s_local_ip) + if not int(result[0]['packets']) == (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.s_global_ip,data.s_local_ip) + nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.s_local_ip) + if not nat_stats: + st.error("Received empty list, nat statistics are not updated") + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.s_local_ip,data.s_global_ip) + if not int(nat_stats[0]['packets']) == (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.s_local_ip,data.s_global_ip) + + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.s_local_ip) + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_nat_snat_data_str_id_1"], offset_list=[26], value_list=[s_ip_addr_h]): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.s_global_ip, data.s_local_ip) + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.s_global_ip) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_nat_snat_data_str_id_1"], offset_list=[30], value_list=[d_ip_addr_h]): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.s_local_ip,data.s_global_ip) + + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_static_napt(): + # ################################################ + # Objective - Verify static NAPT functionality for TCP traffic + # ################################################# + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"]) + st.wait(data.wait_nat_stats) + result = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, src_ip=data.in1_ip_addr_h[1], src_ip_port= data.tcp_src_local_port) + if not result: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[1], data.out_ip_addr_l[1]) + if not (int(result[0]['packets']) == (int(data.pkt_count))): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[1],data.out_ip_addr_l[1]) + nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, dst_ip=data.out_ip_addr_l[1], dst_ip_port=data.tcp_src_global_port) + if not nat_stats: + st.error("Received empty list, acl counters not updated") + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1]) + if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1]) + + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[1]) + s_port_h = util_int_to_hexa_conv(data.tcp_src_global_port) + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"], offset_list=[23, 26, 34], value_list=['06', s_ip_addr_h, s_port_h]): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[1],data.out_ip_addr_l[1]) + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[1]) + d_port_h = util_int_to_hexa_conv(data.tcp_src_local_port) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"], offset_list=[23, 30, 36], value_list=['06', d_ip_addr_h, d_port_h]): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1]) + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_static_napt_snat(): + # ################################################ + # Objective - Verify static NAPT functionality for UDP traffic with nat type as snat + # ################################################# + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"]) + st.wait(data.wait_nat_stats) + result = natapi.poll_for_nat_statistics(vars.D1, protocol=(data.proto_udp), src_ip=data.in1_ip_addr_h[2], src_ip_port=data.udp_src_local_port) + if not result: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.out_ip_addr_l[2], data.in1_ip_addr_h[2]) + if not int(result[0]['packets']) == (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet",data.out_ip_addr_l[2],data.in1_ip_addr_h[2]) + nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=data.out_ip_addr_l[2], dst_ip_port=data.udp_src_global_port) + if not nat_stats: + st.error("Received empty list, nat statistics are not updated") + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0]) + if not int(nat_stats[0]['packets']) == (int(data.pkt_count)): + st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0]) + + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[2]) + s_port_h = util_int_to_hexa_conv(data.udp_src_global_port) + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"], offset_list=[23, 26, 34], value_list=['11', s_ip_addr_h, s_port_h]): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.out_ip_addr_l[2], data.in1_ip_addr_h[2]) + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[2]) + d_port_h = util_int_to_hexa_conv(data.udp_src_local_port) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"], offset_list=[23, 30, 36], value_list=['11', d_ip_addr_h, d_port_h]): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0]) + + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_static_napt_entry_remove_reapply(): + # ################################################ + # Objective - Verify static NAPT functionality after NAT entries are removed and re applied + # ################################################# + natapi.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1], + local_ip=data.in1_ip_addr_h[1], + local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port, + config=data.config_del, nat_type=data.nat_type_dnat) + natapi.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1], + local_ip=data.in1_ip_addr_h[1], + local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port, + config=data.config_add, nat_type=data.nat_type_dnat) + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"]) + st.wait(data.wait_nat_stats) + result = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, src_ip=data.in1_ip_addr_h[1], src_ip_port=data.tcp_src_local_port) + if not result: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[1], data.out_ip_addr_l[1]) + if not (int(result[0]['packets']) == (int(data.pkt_count))): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet",data.in1_ip_addr_h[1],data.out_ip_addr_l[1]) + nat_stats = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_tcp, dst_ip=data.out_ip_addr_l[1], dst_ip_port=data.tcp_src_global_port) + if not nat_stats: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1]) + if not (int(nat_stats[0]['packets']) == (int(data.pkt_count))): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_addr_l[1], data.in1_ip_addr_h[1]) + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_static_napt_same_zone(): + # ################################################ + # Objective - Verify that if zones are same traffic should get forwarded as per L3 table. + # ################################################# + natapi.config_nat_interface(vars.D1, interface_name=vars.D1T1P2, zone_value=data.zone_1, config=data.config_add) + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_dnat_data_str_id_1"]) + natapi.config_nat_interface(vars.D1, interface_name=vars.D1T1P2, zone_value=data.zone_2, config=data.config_add) + st.wait(data.wait_nat_stats) + nat_stats = natapi.get_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_addr_l[0]) + if not nat_stats: + st.error("Received empty list, nat statistics are not updated") + util_nat_debug_fun() + st.report_fail("nat_translation_happening_in_no_nat_case") + if int(nat_stats[0]['packets']) == (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("nat_translation_happening_in_no_nat_case") + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_static_twicenat(): + # ################################################ + # Objective - Verify static twicenat functionality + # ################################################# + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_st_nat_twicenat_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_st_nat_twicenat_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_st_nat_twicenat_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_st_nat_twicenat_data_str_id_1"]) + natapi.show_nat_translations(vars.D1) + nat_stats = natapi.poll_for_twice_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.tw_global_ip_addr, dst_ip=data.out_ip_addr_l[3]) + if not nat_stats: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("twicenat_translation_failed_in_packet") + if not int(nat_stats[0]['packets']) == (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("twicenat_translation_failed_in_packet") + result = natapi.poll_for_twice_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[3], dst_ip=data.tw_test_ip_addr) + if not result: + st.error("Received empty list, nat statistics not updated") + util_nat_debug_fun() + st.report_fail("twicenat_translation_failed_in_packet") + if not int(result[0]['packets']) == (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("twicenat_translation_failed_in_packet") + + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_addr_l[3]) + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.tw_global_ip_addr) + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_st_nat_twicenat_data_str_id_1"], offset_list=[26, 30], value_list=[s_ip_addr_h, d_ip_addr_h]): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.out_ip_addr_l[2], data.in1_ip_addr_h[2]) + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.tw_test_ip_addr) + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[3]) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_st_nat_twicenat_data_str_id_1"], offset_list=[26, 30], value_list=[s_ip_addr_h, d_ip_addr_h]): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.in1_ip_addr_h[2],data.out_ip_addr_l[0]) + + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_dynamic_napt_without_acl_bind_udp(): + # ################################################ + # Objective - Verify dynamic NAT establishes a mapping between an inside local address and an inside global address + # dynamically selected from a pool of global addresses. Also verifies udp entry time out. + # ################################################# + natapi.config_nat_timeout(vars.D1, udp_timeout=120, config='set') + natapi.clear_nat(vars.D1, translations=True) + st.log("Traffic for snat learning via dynamic pool") + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + st.log("Traffic for data forwarding to check nat stats") + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + trn_val = natapi.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1],src_ip_port=data.local_src_port[0]) + if not trn_val: + st.error("Received empty list,nat translation table not updated") + util_nat_debug_fun() + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + trn_src_ip = trn_val[0]["trn_src_ip"] + trn_src_port = trn_val[0]["trn_src_ip_port"] + ip_addr_h = util_ip_addr_to_hexa_conv(trn_src_ip) + port_h = util_int_to_hexa_conv(trn_src_port) + st.log("Sending traffic for dnat verification") + tg2_str_obj = tg2_str_selector(trn_src_ip, trn_src_port) + tg2.tg_traffic_control(action='run', handle=tg2_str_obj) + tg2.tg_traffic_control(action='stop', handle=tg2_str_obj) + # Show command for debugging purpose in case of failures. + intfapi.show_interface_counters_all(vars.D1) + st.wait(data.wait_nat_stats) + nat_stats_s = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, + src_ip=data.in1_ip_addr_h[-1], src_ip_port=data.local_src_port[0]) + if not nat_stats_s: + st.error("Received empty list,nat statistics are not updated") + util_nat_debug_fun() + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + if not int(nat_stats_s[0]['packets']) >= (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + + nat_stats_d = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=trn_src_ip,dst_ip_port=trn_src_port) + if not nat_stats_d: + st.error("Received empty list, nat statistics are not updated") + util_nat_debug_fun() + st.report_fail("dynamic_dnat_translation_entry_create_fail",data.out_ip_pool[0],data.out_ip_pool[0]) + if not int(nat_stats_d[0]['packets']) >= (int(data.pkt_count)): + util_nat_debug_fun() + st.report_fail("dynamic_dnat_translation_entry_create_fail",data.out_ip_pool[0],data.out_ip_pool[0]) + st.log("Try deleting the pool") + if not natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range, + global_port_range= data.global_port_range, config=data.config_del, skip_error=True): + util_nat_debug_fun() + st.report_fail("pool_del_fail",data.pool_name[0]) + + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"], + offset_list=[23, 26, 34], value_list=['11', ip_addr_h, port_h]): + util_nat_debug_fun() + st.report_fail("snat_translation_failed_in_packet", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[-1]) + port_h = util_int_to_hexa_conv(data.local_src_port[0]) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg2_str_obj, + offset_list=[23, 30, 36], value_list=['11', ip_addr_h, port_h]): + util_nat_debug_fun() + st.report_fail("dnat_translation_fail_in_packet", data.out_ip_pool[0],data.in1_ip_addr_h[-1]) + + st.log("NAT UDP entry timeout verification") + st.log("Waiting for UDP timeout") + st.wait(180) + trn_val = natapi.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1], + src_ip_port=data.local_src_port[0]) + + if trn_val: + st.error("Received non-empty list,nat translation entry exists") + util_nat_debug_fun() + st.report_fail("nat_translation_table_not_cleared") + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_nat_docker_restart(): + # ################################################ + # Objective - Verify nat translation table after nat docket restart + # ################################################# + + st.log("Sending traffic for dynamic NAT snat case") + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + basicapi.docker_operation(vars.D1,"nat","stop") + st.log("Wait for NAT docker to STOP") + st.wait(data.wait_time_after_docker_restart) + basicapi.docker_operation(vars.D1, "nat", "start") + st.log("Wait for NAT docker to START") + st.wait(data.wait_time_after_docker_restart) + trn_val = natapi.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1], + src_ip_port=data.local_src_port[0]) + if trn_val: + util_nat_debug_fun() + st.report_fail("dynamic_napt_entry_exists_after_docker_restart") + trn_val_1 = natapi.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0]) + if not trn_val_1: + util_nat_debug_fun() + st.report_fail("static_nat_entry_not_restored_after_docker_restart") + st.report_pass("test_case_passed") + + +@pytest.mark.nat_regression +def test_ft_dynamic_nat(): + # ################################################ + # Objective - Verify basic dynamic nat translation + # ################################################# + natapi.clear_nat(vars.D1, translations=True) + st.log("Deleting NAT Pool binding") + natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_del) + st.log("Creating NAT Pool-1 without port") + natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[1], global_ip_range=data.out_ip_pool[0], + config=data.config_add) + st.log("Creating NAT Pool binding") + natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1], + config=data.config_add) + st.log("Traffic for learning") + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"]) + st.log("Traffic for data forwarding to check nat stats") + tg1.tg_traffic_control(action='run', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"]) + st.wait(data.wait_nat_stats) + tc_fail_flag = 0 + result = natapi.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[-1]) + if not result: + st.error("Dynamic NAT failed for SRC IP") + tc_fail_flag = 1 + nat_stats_s = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all,src_ip=data.in1_ip_addr_h[-1]) + if not nat_stats_s: + tc_fail_flag = 1 + nat_stats_d = natapi.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_pool[0]) + if not nat_stats_d: + tc_fail_flag = 1 + + if data.nat_pkt_cap_enable: + st.log("Validation through Packet Capture") + s_ip_addr_h = util_ip_addr_to_hexa_conv(data.out_ip_pool[0]) + if not util_pkt_cap(tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data["tg1"]["tg1_dyn_nat_udp_data_str_id_1"], + offset_list=[26], value_list=[s_ip_addr_h]): + tc_fail_flag = 1 + d_ip_addr_h = util_ip_addr_to_hexa_conv(data.in1_ip_addr_h[-1]) + if not util_pkt_cap(tg2, tg1, tg_ph_2, tg_ph_1, tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"], + offset_list=[30], value_list=[d_ip_addr_h]): + tc_fail_flag = 1 + + natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1], + config=data.config_del) + natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range, + global_port_range= data.global_port_range, config=data.config_add) + natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_add) + if tc_fail_flag: + util_nat_debug_fun() + st.report_fail("dynamic_nat_fail") + st.report_pass("dynamic_nat_successful") + + +def nat_tg_config(): + global tg_handler, tg1, tg2, tg_ph_1, tg_ph_2, tg_str_data, tg_rt_int_handle + tg_handler = util_tg_init(vars, [vars.T1D1P1, vars.T1D1P2]) + tg1 = tg_handler["tg"] + tg2 = tg_handler["tg"] + tg_ph_1 = tg_handler["tg_ph_1"] + tg_ph_2 = tg_handler["tg_ph_2"] + tg_rt_int_handle = util_tg_routing_int_config(vars, tg1, tg2, tg_ph_1, tg_ph_2) + tg_str_data = util_tg_stream_config(tg1, tg2, tg_ph_1, tg_ph_2) + + +def nat_dut_config(): + global dut1_rt_int_mac + ipapi.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.in1_ip_addr, data.in1_ip_addr_mask, family=data.af_ipv4) + for i in range(0, len(data.out_ip_addr_l)): + ipapi.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.out_ip_addr_l[i], data.out_ip_addr_mask, + family=data.af_ipv4) + for i in range(0, len(data.out_ip_pool)): + ipapi.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.out_ip_pool[i], data.out_ip_addr_mask, + family=data.af_ipv4) + dut1_rt_int_mac = basicapi.get_ifconfig_ether(vars.D1, vars.D1T1P1) + ipapi.create_static_route(vars.D1, data.out_ip_addr_h, + "{}/{}".format(data.global_ip_addr_rt, data.global_ip_addr_mask), + shell=data.shell_vtysh, family=data.af_ipv4) + ipapi.create_static_route(vars.D1, data.in1_ip_addr_h[0], + "{}/{}".format(data.s_global_ip_rt, data.s_global_ip_mask)) + ipapi.create_static_route(vars.D1, data.out_ip_addr_h, + "{}/{}".format(data.tw_global_ip_addr_rt, data.tw_global_ip_addr_mask)) + + st.log("NAT Configuration") + natapi.config_nat_feature(vars.D1, 'enable') + util_nat_zone_config(vars, [vars.D1T1P1, vars.D1T1P2], [data.zone_1, data.zone_2], config=data.config_add) + natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.out_ip_addr_l[0], + local_ip=data.in1_ip_addr_h[0], config=data.config_add, nat_type=data.nat_type_dnat) + natapi.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1], + local_ip=data.in1_ip_addr_h[1], + local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port, + config=data.config_add, nat_type=data.nat_type_dnat) + natapi.config_nat_static(vars.D1, protocol=data.proto_udp, global_ip=data.in1_ip_addr_h[2], + local_ip=data.out_ip_addr_l[2], + local_port_id=data.udp_src_global_port, global_port_id=data.udp_src_local_port, + config=data.config_add, nat_type=data.nat_type_snat) + natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.s_global_ip, local_ip=data.s_local_ip, + config=data.config_add, nat_type=data.nat_type_snat) + natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.out_ip_addr_l[3], + local_ip=data.in1_ip_addr_h[3], + config=data.config_add, nat_type=data.nat_type_dnat, twice_nat_id=data.twice_nat_id_1) + natapi.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.tw_global_ip_addr, + local_ip=data.tw_test_ip_addr, + config=data.config_add, nat_type=data.nat_type_snat, twice_nat_id=data.twice_nat_id_1) + natapi.show_nat_translations(vars.D1) + # dynamic NAT config + st.log("Creating NAT Pool-1") + natapi.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range, + global_port_range=data.global_port_range, config=data.config_add) + st.log("Creating NAT Pool binding") + natapi.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_add) + + +def nat_prolog(): + global vars + vars = st.ensure_min_topology("D1T1:2") + platform = basicapi.get_hwsku(vars.D1) + common_constants = st.get_datastore(vars.D1, "constants", "default") + if platform.lower() in common_constants['TH3_PLATFORMS']: + st.error("NAT is not supported for this platform {}".format(platform)) + st.report_unsupported('NAT_unsupported_platform',platform) + [_, exceptions] = exec_all(True, [[nat_tg_config], [nat_dut_config]], first_on_main=True) + ensure_no_exception(exceptions) + + +def nat_epilog(): + vars = st.get_testbed_vars() + util_nat_zone_config(vars, [vars.D1T1P1, vars.D1T1P2], [data.zone_1, data.zone_2], config=data.config_del) + natapi.clear_nat_config(vars.D1) + natapi.config_nat_feature(vars.D1, 'disable') + ipapi.delete_static_route(vars.D1, data.out_ip_addr_h, + "{}/{}".format(data.global_ip_addr_rt, data.global_ip_addr_mask)) + ipapi.clear_ip_configuration(st.get_dut_names()) + vlanapi.clear_vlan_configuration(st.get_dut_names()) + if vars.config.module_epilog_tgen_cleanup: + st.log("Clearing Routing interface config on TG ports") + tg1.tg_interface_config(port_handle=tg_ph_1, handle=tg_rt_int_handle[0]['handle'], mode='destroy') + tg1.tg_interface_config(port_handle=tg_ph_2, handle=tg_rt_int_handle[1]['handle'], mode='destroy') + tgapi.traffic_action_control(tg_handler, actions=['reset']) + + +def util_nat_zone_config(vars, intf, zone, config): + if config == data.config_add: + st.log("zone value configuration") + for i in range(len(intf)): + natapi.config_nat_interface(vars.D1, interface_name=intf[i], zone_value=zone[i], config=data.config_add) + else: + st.log("zone value un configuration") + for i in range(len(intf)): + natapi.config_nat_interface(vars.D1, interface_name=intf[i], zone_value=zone[i], config=data.config_del) + + return True + + +def util_tg_init(vars, tg_port_list): + tg_port_list = list(tg_port_list) if isinstance(tg_port_list, list) else [tg_port_list] + tg_handler = tgapi.get_handles(vars, tg_port_list) + return tg_handler + + +def util_tg_routing_int_config(vars, tg1, tg2, tg_ph_1, tg_ph_2): + st.log("TG1 {} IPv4 address {} config".format(vars.T1D1P1,data.in1_ip_addr_h[0])) + tg1_rt_int_handle = tg1.tg_interface_config(port_handle=tg_ph_1, mode='config', intf_ip_addr=data.in1_ip_addr_h[0], + gateway=data.in1_ip_addr, netmask='255.255.0.0', arp_send_req='1',count='10', gateway_step='0.0.0.0') + st.log("TG2 {} IPv4 address {} config".format(vars.T1D1P2,data.out_ip_addr_h)) + tg2_rt_int_handle = tg2.tg_interface_config(port_handle=tg_ph_2, mode='config', intf_ip_addr=data.out_ip_addr_h, + gateway=data.out_ip_addr_l[0], netmask='255.255.255.0', arp_send_req='1',count='10', gateway_step='0.0.0.0') + return tg1_rt_int_handle,tg2_rt_int_handle + + +def util_tg_stream_config(tg1, tg2, tg_ph_1, tg_ph_2): + result = {"tg1":{},"tg2":{}} + st.log("TG1 Stream config") + tg1_st_nat_dnat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4',mac_src=data.tg1_src_mac_addr, mac_dst=dut1_rt_int_mac, + ip_src_addr=data.in1_ip_addr_h[0],ip_dst_addr=data.global_ip_addr) + result["tg1"]["tg1_st_nat_dnat_data_str_id_1"] = tg1_st_nat_dnat_data_str['stream_id'] + tg1_st_napt_tcp_dnat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr, + mac_dst=dut1_rt_int_mac, + ip_src_addr=data.in1_ip_addr_h[1], ip_dst_addr=data.global_ip_addr, l4_protocol='tcp', + tcp_src_port=data.tcp_src_local_port, tcp_dst_port=data.tcp_dst_local_port) + result["tg1"]["tg1_st_napt_tcp_dnat_data_str_id_1"] = tg1_st_napt_tcp_dnat_data_str['stream_id'] + tg1_st_nat_snat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', + mac_src=data.tg1_src_mac_addr, mac_dst=dut1_rt_int_mac, + ip_src_addr=data.s_global_ip, ip_dst_addr=data.global_ip_addr) + result["tg1"]["tg1_st_nat_snat_data_str_id_1"] = tg1_st_nat_snat_data_str['stream_id'] + tg1_st_napt_udp_dnat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create',transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr, + mac_dst=dut1_rt_int_mac, + ip_src_addr=data.in1_ip_addr_h[2], + ip_dst_addr=data.global_ip_addr, l4_protocol='udp', + udp_src_port=data.udp_src_local_port, + udp_dst_port=data.udp_dst_local_port) + result["tg1"]["tg1_st_napt_udp_dnat_data_str_id_1"] = tg1_st_napt_udp_dnat_data_str['stream_id'] + tg1_st_nat_twicenat_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='single_burst', pkts_per_burst=data.pkt_count, + rate_pps=data.rate_traffic, l3_protocol='ipv4', + mac_src=data.tg1_src_mac_addr, mac_dst=dut1_rt_int_mac, + ip_src_addr=data.in1_ip_addr_h[3],ip_dst_addr=data.tw_test_ip_addr) + result["tg1"]["tg1_st_nat_twicenat_data_str_id_1"] = tg1_st_nat_twicenat_data_str['stream_id'] + tg1_dyn_nat_udp_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr, + mac_dst=dut1_rt_int_mac, + ip_src_addr=data.in1_ip_addr_h[-1], + ip_dst_addr=data.global_ip_addr, l4_protocol='udp', + udp_src_port=data.local_src_port[0], + udp_dst_port=data.local_dst_port[0]) + result["tg1"]["tg1_dyn_nat_udp_data_str_id_1"] = tg1_dyn_nat_udp_data_str['stream_id'] + + st.log("TG2 Stream config") + tg2_st_nat_dnat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, l3_protocol='ipv4', + mac_src=data.tg2_src_mac_addr,mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_addr_l[0]) + result["tg2"]["tg2_st_nat_dnat_data_str_id_1"] = tg2_st_nat_dnat_data_str['stream_id'] + tg2_st_napt_tcp_dnat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, ip_dst_addr=data.out_ip_addr_l[1], + l4_protocol='tcp',tcp_src_port=data.tcp_dst_global_port, tcp_dst_port=data.tcp_src_global_port) + result["tg2"]["tg2_st_napt_tcp_dnat_data_str_id_1"] = tg2_st_napt_tcp_dnat_data_str['stream_id'] + tg2_st_nat_snat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr,ip_dst_addr=data.s_local_ip) + result["tg2"]["tg2_st_nat_snat_data_str_id_1"] = tg2_st_nat_snat_data_str['stream_id'] + tg2_st_napt_udp_dnat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst', + pkts_per_burst=data.pkt_count,rate_pps=data.rate_traffic, l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, ip_dst_addr=data.out_ip_addr_l[2], + l4_protocol='udp',udp_src_port=data.udp_dst_global_port, udp_dst_port=data.udp_src_global_port) + result["tg2"]["tg2_st_napt_udp_dnat_data_str_id_1"] = tg2_st_napt_udp_dnat_data_str['stream_id'] + tg2_st_nat_twicenat_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', transmit_mode='single_burst', pkts_per_burst=data.pkt_count, + rate_pps=data.rate_traffic, l3_protocol='ipv4', + mac_src=data.tg2_src_mac_addr, mac_dst=dut1_rt_int_mac, + ip_src_addr=data.tw_global_ip_addr,ip_dst_addr=data.out_ip_addr_l[3]) + result["tg2"]["tg2_st_nat_twicenat_data_str_id_1"] = tg2_st_nat_twicenat_data_str['stream_id'] + tg2_dyn_nat_udp_1_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[0],l4_protocol='udp', udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[0]) + result["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"] = tg2_dyn_nat_udp_1_data_str['stream_id'] + tg2_dyn_nat_udp_2_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[0], l4_protocol='udp',udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[1]) + result["tg2"]["tg2_dyn_nat_udp_2_data_str_id_1"] = tg2_dyn_nat_udp_2_data_str['stream_id'] + tg2_dyn_nat_udp_3_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[1], l4_protocol='udp', udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[0]) + result["tg2"]["tg2_dyn_nat_udp_3_data_str_id_1"] = tg2_dyn_nat_udp_3_data_str['stream_id'] + tg2_dyn_nat_udp_4_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[1], l4_protocol='udp', udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[1]) + result["tg2"]["tg2_dyn_nat_udp_4_data_str_id_1"] = tg2_dyn_nat_udp_4_data_str['stream_id'] + return result + + +def tg2_str_selector(trn_ip, trn_port): + ip1 = data.out_ip_pool[0] + ip2 = data.out_ip_pool[1] + p1 = data.global_dst_port[0] + p2 = data.global_dst_port[1] + s1 = tg_str_data["tg2"]["tg2_dyn_nat_udp_1_data_str_id_1"] + s2 = tg_str_data["tg2"]["tg2_dyn_nat_udp_2_data_str_id_1"] + s3 = tg_str_data["tg2"]["tg2_dyn_nat_udp_3_data_str_id_1"] + s4 = tg_str_data["tg2"]["tg2_dyn_nat_udp_4_data_str_id_1"] + tg2_stream_map = {s1: [ip1, p1], s2: [ip1, p2], s3: [ip2, p1], s4:[ip2, p2]} + for k, v in tg2_stream_map.items(): + if v ==[trn_ip, trn_port]: + return k + + +def util_pkt_cap(tg_tx, tg_rx, tx_p_h, rx_p_h, str_h, offset_list=[], value_list=[]): + tg_rx.tg_packet_control(port_handle=rx_p_h, action='start') + tg_tx.tg_traffic_control(action='run', handle=str_h) + tg_rx.tg_packet_control(port_handle=rx_p_h, action='stop') + tg_tx.tg_traffic_control(action='stop', handle=str_h) + pkt_cap = tg_rx.tg_packet_stats(port_handle=rx_p_h, format='var', output_type='hex') + st.log(pkt_cap) + pkt_cap_res = tgapi.validate_packet_capture(tg_type=tg_rx.tg_type, pkt_dict=pkt_cap, + offset_list=offset_list, value_list=value_list) + return pkt_cap_res + + +def util_ip_addr_to_hexa_conv(ipaddr): + ipaddr_h = hexlify(inet_aton(ipaddr)).upper() + return ipaddr_h + + +def util_int_to_hexa_conv(trn_src_port): + return (hex(int(trn_src_port)))[2:].zfill(4).upper() + + +def util_nat_debug_fun(): + st.banner("Collecting the needed debug info for failure analysis", width=100) + intfapi.show_interface_counters_all(vars.D1) + natapi.show_nat_translations(vars.D1) + ipapi.get_interface_ip_address(vars.D1) + arpapi.show_arp(vars.D1) diff --git a/spytest/tests/routing/NAT/test_nat_reboot_long_run.py b/spytest/tests/routing/NAT/test_nat_reboot_long_run.py new file mode 100644 index 00000000000..ce3bd63896a --- /dev/null +++ b/spytest/tests/routing/NAT/test_nat_reboot_long_run.py @@ -0,0 +1,611 @@ +import pytest + +from spytest import st, tgapi, SpyTestDict +from spytest.utils import random_vlan_list + +import apis.routing.ip as ip_obj +import apis.routing.nat as nat_obj +import apis.switching.vlan as vlan_obj +import apis.system.interface as intf_obj +import apis.qos.acl as acl_obj +import apis.system.basic as basic_obj +import apis.system.reboot as reboot_obj +import apis.routing.arp as arp_obj + +dut = dict() + +def nat_reboot_initialize_variables(): + global data + data = SpyTestDict() + data.in1_ip_addr = "12.12.0.1" + data.in1_ip_addr_h = ["12.12.0.2", "12.12.0.3", "12.12.0.4","12.12.0.5", "12.12.0.6", "12.12.0.7","12.12.0.8", + "12.12.0.9", "12.12.0.10", "12.12.0.11"] + data.in1_ip_addr_rt = "12.12.0.0" + data.in1_ip_addr_mask = "16" + data.in2_ip_addr = "13.13.13.1" + data.in2_ip_addr_h = ["13.13.13.2", "13.13.13.3", "13.13.13.4"] + data.in2_ip_addr_rt = "13.13.13.0" + data.in2_ip_addr_mask = "24" + data.in3_ip_addr = "23.1.0.1" + data.in3_ip_addr_h = ["23.1.0.2", "23.1.0.3", "23.1.0.4"] + data.in3_ip_addr_rt = "23.1.0.0" + data.in3_ip_addr_mask = "16" + data.out_ip_addr = "125.56.90.11" + data.out_ip_addr_l = ["125.56.90.12", "125.56.90.13", "125.56.90.14", "125.56.90.15", "125.56.90.16"] + data.out_ip_addr_h = "125.56.90.1" + data.out_ip_range = "125.56.90.23-125.56.90.24" + data.out_ip_pool = ["125.56.90.23", "125.56.90.24"] + data.out_ip_addr_rt = "125.56.90.0" + data.out_ip_addr_mask = "24" + data.out2_ip_addr = "85.16.0.48" + data.out2_ip_addr_l = ["85.16.0.49", "85.16.0.50", "85.16.0.51"] + data.out2_ip_addr_h = "85.16.0.49" + data.out2_ip_range = "85.16.0.30-85.16.0.50" + data.out2_ip_addr_rt = "85.16.0.0" + data.out2_ip_addr_mask = "16" + data.global_ip_addr_h = "129.2.30.13" + data.global_ip_addr = "129.2.30.12" + data.global_ip_addr_rt = "129.2.30.0" + data.global_ip_addr_mask = "24" + data.test_ip_addr = "22.22.22.1" + data.test_ip_addr_mask = "16" + data.test_ip_addr_rt = "22.22.0.0" + data.s_local_ip = "11.11.11.2" + data.s_local_ip_route = "11.11.0.0" + data.s_local_ip_mask = "16" + data.s_global_ip = "88.98.128.2" + data.s_global_ip_rt = "88.98.128.0" + data.s_global_ip_mask = "24" + data.proto_all = "all" + data.proto_tcp = "tcp" + data.proto_udp = "udp" + data.zone_1 = "0" + data.zone_2 = "1" + data.zone_3 = "2" + data.zone_4 = "3" + data.pool_name = ["pool_123_nat", "88912_pool", "123Pool"] + data.bind_name = ["bind_1", "7812_bind", "bind_11"] + data.global_port_range = "333-334" + data.local_src_port = ["251", "252"] + data.local_dst_port = ["444", "8991"] + data.global_src_port = ["12001", "7781"] + data.global_dst_port = ["333", "334"] + data.tcp_src_local_port = 1002 + data.tcp_dst_local_port = 3345 + data.udp_src_local_port = 7781 + data.udp_dst_local_port = 8812 + data.tcp_src_global_port = 100 + data.tcp_dst_global_port = 345 + data.udp_src_global_port = 7811 + data.udp_dst_global_port = 5516 + data.af_ipv4 = "ipv4" + data.nat_type_snat = "snat" + data.nat_type_dnat = "dnat" + data.shell_sonic = "sonic" + data.shell_vtysh = "vtysh" + data.vlan_list = random_vlan_list(4) + data.vlan_int_1 = "Vlan{}".format(str(data.vlan_list[0])) + data.vlan_int_2 = "Vlan{}".format(str(data.vlan_list[1])) + data.vlan_int_3 = "Vlan{}".format(str(data.vlan_list[2])) + data.vlan_int_4 = "Vlan{}".format(str(data.vlan_list[3])) + data.port_channel = "PortChannel100" + data.l2_source_mac = "00:00:00:EA:23:0F" + data.l2_destination_mac = "00:00:11:0A:45:33" + data.rate_pkt_cap = '5' + data.rate_traffic = '1500' + data.pkt_count = '1500' + data.host_mask = '32' + data.mask_2 = '24' + data.packet_forward_action = 'FORWARD' + data.packet_do_not_nat_action = 'DO_NOT_NAT' + data.packet_drop_action = 'DROP' + data.stage_Ing = 'INGRESS' + data.stage_Egr = 'EGRESS' + data.acl_table_nat = 'NAT_ACL' + data.acl_table_in_nat_eg = 'in_nat_eg' + data.acl_table_out_nat_eg = 'out_nat_eg' + data.acl_table_nat = 'NAT_ACL' + data.type = 'L3' + data.acl_drop_all_rule = 'INGRESS_FORWARD_L3_DROP_ALL_RULE' + data.ipv4_type = 'ipv4any' + data.tg1_src_mac_addr = '00:00:23:11:14:08' + data.tg2_src_mac_addr = '00:00:23:1B:14:07' + data.tg2_src_mac_addr = '00:00:43:32:1A:01' + data.wait_time_traffic_run_to_pkt_cap = 1 + data.wait_time_traffic_run = 1 + data.wait_time_after_reload = 30 + data.wait_time_after_reboot = 60 + data.wait_time_to_no_shut = 30 + data.wait_time_after_docker_restart = 30 + data.wait_nat_tcp_timeout = 60 + data.wait_nat_udp_timeout = 60 + data.wait_nat_stats = 7 + data.config_add='add' + data.config_del='del' + data.twice_nat_id_1 = '100' + data.twice_nat_id_2 = '1100' + data.wait_time_after_docker_restart = 10 + data.mask = '32' + data.max_nat_entries = "1024" + + + +@pytest.fixture(scope="module", autouse=True) +def nat_module_config(request): + nat_reboot_initialize_variables() + nat_pre_config() + yield + nat_post_config() + + +@pytest.fixture(scope="function") +def cmds_func_hooks(request): + yield + + +@pytest.mark.nat_longrun +def test_ft_nat_save_reboot(): + # ################ Author Details ################ + # Name: Kiran Vedula + # Eamil: kiran-kumar.vedula@broadcom.com + # ################################################ + # Objective - Verify dynamic NAPT translations after DUT reboot + # ################################################# + nat_obj.clear_nat(vars.D1, translations=True) + nat_obj.clear_nat(vars.D1, statistics=True) + nat_obj.show_nat_translations(vars.D1) + st.log("Reboot the DUT") + reboot_obj.config_save(vars.D1, "sonic") + reboot_obj.config_save(vars.D1, "vtysh") + st.reboot(vars.D1) + st.log("Traffic for snat case") + tg1.tg_traffic_control(action='run', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) + if not ip_obj.ping(vars.D1, data.in1_ip_addr_h[-1], family='ipv4',count=3): + nat_reboot_debug_fun() + st.report_fail("ping_fail",data.in1_ip_addr,data.in1_ip_addr_h[-1]) + st.wait(data.wait_nat_stats) + st.log("Checking for STATIC entries after reboot") + trn_val_1 = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0]) + if not trn_val_1: + nat_reboot_debug_fun() + st.report_fail("static_nat_translation_entry_create_fail", data.in1_ip_addr_h[0], data.out_ip_pool[0]) + count = data.pkt_count + trn_val = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1], + src_ip_port=data.local_src_port[0]) + if not trn_val: + nat_reboot_debug_fun() + st.error("Received empty list,nat translation table not updated") + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + trn_src_ip = trn_val[0]["trn_src_ip"] + trn_src_port = trn_val[0]["trn_src_ip_port"] + st.log("Traffic for dnat case") + tg2_str_obj = tg2_str_selector(trn_src_ip, trn_src_port) + tg2.tg_traffic_control(action='run', handle=tg2_str_obj) + tg2.tg_traffic_control(action='stop', handle=tg2_str_obj) + st.wait(data.wait_nat_stats) + nat_stats_s = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, + src_ip=data.in1_ip_addr_h[-1], src_ip_port=data.local_src_port[0]) + if not nat_stats_s: + nat_reboot_debug_fun() + st.error("Received empty list,nat statistics are not updated") + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + if not (int(nat_stats_s[0]['packets']) >= (0.80 * (int(count)))): + nat_reboot_debug_fun() + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + + nat_stats_d = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=trn_src_ip, + dst_ip_port=trn_src_port) + if not nat_stats_d: + nat_reboot_debug_fun() + st.error("Received empty list, nat statistics are not updated") + st.report_fail("dynamic_dnat_translation_entry_create_fail", data.out_ip_pool[0], data.out_ip_pool[0]) + if not (int(nat_stats_d[0]['packets']) >= (0.80 * (int(count)))): + nat_reboot_debug_fun() + st.report_fail("dynamic_dnat_translation_entry_create_fail", data.out_ip_pool[0], data.out_ip_pool[0]) + st.report_pass("nat_translation_successful_after_reboot") + + +@pytest.mark.nat_longrun +def test_ft_nat_config_reload(): + # ################ Author Details ################ + # Name: Kiran Vedula + # Eamil: kiran-kumar.vedula@broadcom.com + # ################################################ + # Objective - Verify dynamic NAPT translations after config save and reload + # ################################################# + nat_obj.clear_nat(vars.D1, translations=True) + nat_obj.clear_nat(vars.D1, statistics=True) + st.log("Config reload the DUT") + reboot_obj.config_save_reload(vars.D1) + st.log("Get some debug info after config reload is complete") + ip_obj.show_ip_route(vars.D1) + arp_obj.show_arp(vars.D1) + nat_obj.show_nat_translations(vars.D1) + st.wait(2) + st.log("Traffic for snat case") + tg1.tg_traffic_control(action='run', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) + st.wait(data.wait_nat_stats) + trn_val_1 = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[0]) + if not trn_val_1: + nat_reboot_debug_fun() + st.report_fail("nat_translation_table_entry_deleted_incorrectly") + count = data.pkt_count + trn_val = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_udp, src_ip=data.in1_ip_addr_h[-1], + src_ip_port=data.local_src_port[0]) + if not trn_val: + nat_reboot_debug_fun() + st.error("Received empty list,nat translation table not updated") + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + trn_src_ip = trn_val[0]["trn_src_ip"] + trn_src_port = trn_val[0]["trn_src_ip_port"] + st.log("Traffic for dnat case") + tg2_str_obj = tg2_str_selector(trn_src_ip, trn_src_port) + tg2.tg_traffic_control(action='run', handle=tg2_str_obj) + tg2.tg_traffic_control(action='stop', handle=tg2_str_obj) + st.wait(data.wait_nat_stats) + nat_stats_s = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, + src_ip=data.in1_ip_addr_h[-1], src_ip_port=data.local_src_port[0]) + if not nat_stats_s: + nat_reboot_debug_fun() + st.error("Received empty list,nat statistics are not updated") + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + if not int(nat_stats_s[0]['packets']) >= (0.80 * (int(count))): + nat_reboot_debug_fun() + st.report_fail("dynamic_snat_translation_entry_create_fail", data.in1_ip_addr_h[-1], data.out_ip_pool[0]) + + nat_stats_d = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_udp, dst_ip=trn_src_ip, + dst_ip_port=trn_src_port) + if not nat_stats_d: + nat_reboot_debug_fun() + st.error("Received empty list, nat statistics are not updated") + st.report_fail("dynamic_dnat_translation_entry_create_fail", data.out_ip_pool[0], data.out_ip_pool[0]) + if not int(nat_stats_d[0]['packets']) >= (0.80 * (int(count))): + nat_reboot_debug_fun() + st.report_fail("dynamic_dnat_translation_entry_create_fail", data.out_ip_pool[0], data.out_ip_pool[0]) + st.report_pass("nat_translation_successful_after_config_reload") + + +@pytest.mark.nat_longrun +def test_ft_dynamic_nat_timeout(): + # ################ Author Details ################ + # Name: Kiran Vedula + # Eamil: kiran-kumar.vedula@broadcom.com + # ################################################ + # Objective - Verify that NAT translations after a reboot of DUT + # ################################################# + nat_obj.clear_nat(vars.D1, translations=True) + nat_obj.clear_nat(vars.D1, statistics=True) + nat_obj.config_nat_timeout(vars.D1, timeout=300, config='set') + st.log("Deleting NAT Pool binding") + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_del) + st.log("Creating NAT Pool-1 without port") + nat_obj.config_nat_pool(vars.D1, pool_name=data.pool_name[1], global_ip_range=data.out_ip_pool[0], + config=data.config_add) + st.log("Creating NAT Pool binding") + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1], + config=data.config_add) + st.log("Traffic for Dynamic NAT case") + tg1.tg_traffic_control(action='run', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) + tg1.tg_traffic_control(action='stop', handle=tg_str_data[1]["tg1_dyn_nat_udp_data_str_id_1"]) + tg2.tg_traffic_control(action='run', handle=tg_str_data[2]["tg2_dyn_nat_udp_1_data_str_id_1"]) + tg2.tg_traffic_control(action='stop', handle=tg_str_data[2]["tg2_dyn_nat_udp_1_data_str_id_1"]) + tc_fail_flag = 0 + st.wait(data.wait_nat_stats) + result = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[-1]) + if not result: + nat_reboot_debug_fun() + st.error("Dynamic NAT failed for SRC IP") + tc_fail_flag = 1 + nat_stats_s = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[-1]) + if not nat_stats_s: + nat_reboot_debug_fun() + tc_fail_flag = 1 + nat_stats_d = nat_obj.poll_for_nat_statistics(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_pool[0]) + if not nat_stats_d: + nat_reboot_debug_fun() + tc_fail_flag = 1 + st.log("Waiting for NAT global timeout") + st.wait(350) + result = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_all, src_ip=data.in1_ip_addr_h[-1]) + if result: + nat_reboot_debug_fun() + st.error("Dynamic NAT SRC translations present after timeout") + tc_fail_flag = 1 + result = nat_obj.get_nat_translations(vars.D1, protocol=data.proto_all, dst_ip=data.out_ip_pool[0]) + if result: + nat_reboot_debug_fun() + st.error("Dynamic NAT DST translations present after timeout") + tc_fail_flag = 1 + st.log("Reversing the Module config") + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1], + config=data.config_del) + nat_obj.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range, + global_port_range=data.global_port_range, config=data.config_add) + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_add) + nat_obj.config_nat_timeout(vars.D1, timeout=600, config='set') + if tc_fail_flag: + nat_reboot_debug_fun() + st.report_fail("nat_translation_table_not_cleared") + st.report_pass("nat_translation_global_timeout_verified") + + +@pytest.mark.nat_longrun +def test_ft_dynamic_nat_warmboot(): + # ################ Author Details ################ + # Name: Kesava Swamy Karedla + # Eamil: kesava-swamy.karedla@broadcom.com + # ################################################ + # Objective - FtOpSoRoNatWb001 - Verify warm boot with dynamic nat scaling entries. + # ################################################# + result_flag=0 + platform = basic_obj.get_hwsku(vars.D1) + common_constants = st.get_datastore(vars.D1, "constants", "default") + if not platform.lower() in common_constants['WARM_REBOOT_SUPPORTED_PLATFORMS']: + st.error("Warm-Reboot is not supported for this platform {}".format(platform)) + st.report_unsupported('test_case_unsupported') + nat_obj.clear_nat(vars.D1, translations=True) + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_del) + nat_obj.config_nat_pool_binding(vars.D1, binding_name="scale_bind", pool_name="scale_pool", + acl_name=data.acl_table_in_nat_eg, config=data.config_add) + st.log("Sending continuous traffic at 600 pps for the max dynamic nat entries to get learned") + tg1.tg_traffic_control(action='run', handle=tg_str_data[1]["tg1_scale_nat_udp_data_str_id_1"]) + st.log("Waiting for traffic to run, such that max nat entries get learned") + if not util_check_nat_translations_count(vars.D1,20,data.max_nat_entries): + nat_reboot_debug_fun() + st.log("Failed to learn max nat entries") + result_flag = 1 + tg1.tg_traffic_control(action='stop', handle=tg_str_data[1]["tg1_scale_nat_udp_data_str_id_1"]) + # Show command for debugging purpose in case of failures. + intf_obj.show_interface_counters_all(vars.D1) + st.log("Warm boot verification") + tgapi.traffic_action_control(tg_handler, actions=['clear_stats']) + tg1.tg_traffic_control(action='run', handle=tg_str_data[1]["tg1_scale_nat_udp_data_str_id_1"]) + st.log("Performing warm-reboot, while traffic is forwarding for nat entries") + st.reboot(vars.D1, 'warm') + tg1.tg_traffic_control(action='stop', handle=tg_str_data[1]["tg1_scale_nat_udp_data_str_id_1"]) + traffic_details = { + '1': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg1], + 'exp_ratio': [1], + 'rx_ports': [vars.T1D1P2], + 'rx_obj': [tg2], + } + } + + filter_result = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='aggregate', comp_type='packet_count') + if not filter_result: + nat_reboot_debug_fun() + st.log("Traffic loss observed for the SNAT traffic during warm-boot") + result_flag = 1 + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1], + config=data.config_add) + nat_obj.config_nat_pool_binding(vars.D1, binding_name="scale_bind", pool_name="scale_pool", + acl_name=data.acl_table_in_nat_eg, config=data.config_del) + if result_flag: + st.report_fail("nat_warm_reboot_failed") + st.report_pass("test_case_passed") + + +def nat_pre_config(): + global vars + vars = st.ensure_min_topology("D1T1:2") + platform = basic_obj.get_hwsku(vars.D1) + common_constants = st.get_datastore(vars.D1, "constants", "default") + if platform.lower() in common_constants['TH3_PLATFORMS']: + st.error("NAT is not supported for this platform {}".format(platform)) + st.report_unsupported('NAT_unsupported_platform',platform) + global tg_handler, tg1, tg2, tg_ph_1, tg_ph_2, dut1_rt_int_mac, tg_str_data, tg_rt_int_handle + tg_handler = util_tg_init(vars, [vars.T1D1P1, vars.T1D1P2]) + tg1 = tg_handler["tg"] + tg2 = tg_handler["tg"] + tg_ph_1 = tg_handler["tg_ph_1"] + tg_ph_2 = tg_handler["tg_ph_2"] + ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.in1_ip_addr, data.in1_ip_addr_mask, family=data.af_ipv4) + ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.out_ip_addr_l[0], data.out_ip_addr_mask, family=data.af_ipv4) + dut1_rt_int_mac = basic_obj.get_ifconfig_ether(vars.D1, vars.D1T1P1) + ip_obj.create_static_route(vars.D1, data.out_ip_addr_h, + "{}/{}".format(data.global_ip_addr_rt, data.global_ip_addr_mask), + shell=data.shell_vtysh, family=data.af_ipv4) + ip_obj.create_static_route(vars.D1, data.in1_ip_addr_h[0], "{}/{}".format(data.s_global_ip_rt, data.s_global_ip_mask)) + tg_rt_int_handle = util_tg_routing_int_config(vars, tg1, tg2, tg_ph_1, tg_ph_2) + st.log("NAT Configuration") + nat_obj.config_nat_feature(vars.D1, 'enable') + util_nat_zone_config(vars, [vars.D1T1P1, vars.D1T1P2], [data.zone_1, data.zone_2], config=data.config_add) + nat_obj.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.out_ip_addr_l[0], + local_ip=data.in1_ip_addr_h[0], config=data.config_add, nat_type=data.nat_type_dnat) + nat_obj.config_nat_static(vars.D1, protocol=data.proto_tcp, global_ip=data.out_ip_addr_l[1], + local_ip=data.in1_ip_addr_h[1], + local_port_id=data.tcp_src_local_port, global_port_id=data.tcp_src_global_port, + config=data.config_add, nat_type=data.nat_type_dnat) + nat_obj.config_nat_static(vars.D1, protocol=data.proto_udp, global_ip=data.in1_ip_addr_h[2], + local_ip=data.out_ip_addr_l[2], + local_port_id=data.udp_src_global_port, global_port_id=data.udp_src_local_port, + config=data.config_add, nat_type=data.nat_type_snat) + nat_obj.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.s_global_ip, local_ip=data.s_local_ip, + config=data.config_add, nat_type=data.nat_type_snat) + nat_obj.config_nat_static(vars.D1,protocol=data.proto_all,global_ip=data.out_ip_addr_l[3],local_ip=data.in1_ip_addr_h[3], + config=data.config_add,nat_type=data.nat_type_dnat,twice_nat_id=data.twice_nat_id_1) + nat_obj.config_nat_static(vars.D1, protocol=data.proto_all, global_ip=data.global_ip_addr, + local_ip=data.test_ip_addr, + config=data.config_add, nat_type=data.nat_type_snat, twice_nat_id=data.twice_nat_id_1) + # dynamic NAT config + st.log("Creating NAT Pool-1") + nat_obj.config_nat_pool(vars.D1, pool_name=data.pool_name[0], global_ip_range=data.out_ip_range, + global_port_range= data.global_port_range, config=data.config_add) + nat_obj.config_nat_pool(vars.D1, pool_name="scale_pool", global_ip_range="125.56.90.23-125.56.90.30", + global_port_range="1001-8001", config=data.config_add) + st.log("Creating NAT Pool binding") + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_add) + st.log("Creating NAT Pool-2") + nat_obj.config_nat_pool(vars.D1, pool_name=data.pool_name[1], global_ip_range=data.out2_ip_range, + config=data.config_add) + st.log("Creating NAT Pool-2 binding") + nat_obj.config_nat_pool_binding(vars.D1, binding_name=data.bind_name[1], pool_name=data.pool_name[1], + config=data.config_add) + # nat acl for ingress traffic + acl_obj.create_acl_table(vars.D1, name=data.acl_table_in_nat_eg, stage="INGRESS", type=data.type, + description="ingress-acl", ports=[vars.D1T1P1]) + acl_obj.create_acl_rule(vars.D1, table_name=data.acl_table_in_nat_eg, rule_name="rule-32", packet_action=data.packet_forward_action, + SRC_IP="{}/{}".format(data.in1_ip_addr_rt, data.in1_ip_addr_mask), priority='98', type=data.type, ip_protocol="4") + acl_obj.create_acl_rule(vars.D1, table_name=data.acl_table_in_nat_eg, rule_name="rule-33", + packet_action=data.packet_do_not_nat_action, + SRC_IP="{}/{}".format('14.1.0.1', data.mask), priority='97', type=data.type, ip_protocol="4") + # Checking arp table for debugging + arp_obj.show_arp(vars.D1) + ip_obj.show_ip_route(vars.D1) + # Clearing all interface counters for debugging purpose + intf_obj.clear_interface_counters(vars.D1) + tg_str_data = util_tg_stream_config(tg1, tg2, tg_ph_1, tg_ph_2) + + +def nat_post_config(): + vars = st.get_testbed_vars() + + util_nat_zone_config(vars, [vars.D1T1P1, vars.D1T1P2], [data.zone_1, data.zone_2], config=data.config_del) + nat_obj.clear_nat_config(vars.D1) + nat_obj.config_nat_feature(vars.D1, 'disable') + ip_obj.delete_static_route(vars.D1, data.out_ip_addr_h, + "{}/{}".format(data.global_ip_addr_rt, data.global_ip_addr_mask)) + ip_obj.clear_ip_configuration(st.get_dut_names()) + vlan_obj.clear_vlan_configuration(st.get_dut_names()) + st.log("Cleaning up routing interfaces configured on TG") + tg1.tg_interface_config(port_handle=tg_ph_1, handle=tg_rt_int_handle[0]['handle'], mode='destroy') + tg1.tg_interface_config(port_handle=tg_ph_2, handle=tg_rt_int_handle[1]['handle'], mode='destroy') + tgapi.traffic_action_control(tg_handler, actions=['reset']) + + +def util_nat_zone_config(vars,intf,zone,config): + if config == data.config_add: + st.log("zone value configuration") + for i in range(len(intf)): + nat_obj.config_nat_interface(vars.D1, interface_name=intf[i], zone_value=zone[i], config=data.config_add) + else: + st.log("zone value un configuration") + for i in range(len(intf)): + nat_obj.config_nat_interface(vars.D1, interface_name=intf[i], zone_value=zone[i], config=data.config_del) + + return True + + +def util_tg_init(vars, tg_port_list): + tg_port_list = list(tg_port_list) if isinstance(tg_port_list, list) else [tg_port_list] + tg_handler = tgapi.get_handles(vars, tg_port_list) + tgapi.traffic_action_control(tg_handler, actions=['reset', 'clear_stats']) + return tg_handler + + +def util_tg_routing_int_config(vars,tg1,tg2,tg_ph_1,tg_ph_2): + + st.log("TG1 {} IPv4 address {} config".format(vars.T1D1P1,data.in1_ip_addr_h[0])) + tg1_rt_int_handle = tg1.tg_interface_config(port_handle=tg_ph_1, mode='config', intf_ip_addr=data.in1_ip_addr_h[0],enable_ping_response=1, + gateway=data.in1_ip_addr, netmask='255.255.0.0', arp_send_req='1',count='10', gateway_step='0.0.0.0') + st.log("TG2 {} IPv4 address {} config".format(vars.T1D1P2,data.out_ip_addr_h)) + tg2_rt_int_handle = tg2.tg_interface_config(port_handle=tg_ph_2, mode='config', intf_ip_addr=data.out_ip_addr_h, + gateway=data.out_ip_addr_l[0], netmask='255.255.255.0', arp_send_req='1',count='10', gateway_step='0.0.0.0') + return tg1_rt_int_handle,tg2_rt_int_handle + + +def util_tg_stream_config(tg1,tg2,tg_ph_1,tg_ph_2): + result = {1:{},2:{}} + + tg1_dyn_nat_udp_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg1_src_mac_addr, + mac_dst=dut1_rt_int_mac, + ip_src_addr=data.in1_ip_addr_h[-1], + ip_dst_addr=data.global_ip_addr, l4_protocol='udp', + udp_src_port=data.local_src_port[0], + udp_dst_port=data.local_dst_port[0]) + result[1]["tg1_dyn_nat_udp_data_str_id_1"] = tg1_dyn_nat_udp_data_str['stream_id'] + tg1_scale_nat_udp_data_str = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='continuous', + rate_pps="600", l3_protocol='ipv4',enable_stream_only_gen=0,enable_stream=0, + mac_src=data.tg1_src_mac_addr, mac_src_step='00:00:00:00:00:01', + mac_src_mode='increment', mac_src_count=1018, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.in1_ip_addr_h[2], + ip_src_mode='increment', ip_src_count='1018', + ip_src_step='0.0.0.1', ip_dst_addr=data.global_ip_addr, + l4_protocol='udp', udp_src_port=data.local_src_port[0], + udp_dst_port=data.local_dst_port[0],udp_dst_port_mode="incr", udp_dst_port_step='1', udp_dst_port_count ='1023') + result[1]["tg1_scale_nat_udp_data_str_id_1"] = tg1_scale_nat_udp_data_str['stream_id'] + #### TG2 config + tg2_dyn_nat_udp_1_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[0],l4_protocol='udp', udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[0]) + result[2]["tg2_dyn_nat_udp_1_data_str_id_1"] = tg2_dyn_nat_udp_1_data_str['stream_id'] + tg2_dyn_nat_udp_2_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[0], l4_protocol='udp',udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[1]) + result[2]["tg2_dyn_nat_udp_2_data_str_id_1"] = tg2_dyn_nat_udp_2_data_str['stream_id'] + tg2_dyn_nat_udp_3_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[1], l4_protocol='udp', udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[0]) + result[2]["tg2_dyn_nat_udp_3_data_str_id_1"] = tg2_dyn_nat_udp_3_data_str['stream_id'] + tg2_dyn_nat_udp_4_data_str = tg2.tg_traffic_config(port_handle=tg_ph_2, mode='create', + transmit_mode='single_burst', + pkts_per_burst=data.pkt_count, rate_pps=data.rate_traffic, + l3_protocol='ipv4', mac_src=data.tg2_src_mac_addr, + mac_dst=dut1_rt_int_mac, ip_src_addr=data.global_ip_addr, + ip_dst_addr=data.out_ip_pool[1], l4_protocol='udp', udp_src_port=data.global_src_port[0], + udp_dst_port=data.global_dst_port[1]) + result[2]["tg2_dyn_nat_udp_4_data_str_id_1"] = tg2_dyn_nat_udp_4_data_str['stream_id'] + return result + + +def tg2_str_selector(trn_ip, trn_port): + ip1 = data.out_ip_pool[0] + ip2 = data.out_ip_pool[1] + p1 = data.global_dst_port[0] + p2 = data.global_dst_port[1] + s1 = tg_str_data[2]["tg2_dyn_nat_udp_1_data_str_id_1"] + s2 = tg_str_data[2]["tg2_dyn_nat_udp_2_data_str_id_1"] + s3 = tg_str_data[2]["tg2_dyn_nat_udp_3_data_str_id_1"] + s4 = tg_str_data[2]["tg2_dyn_nat_udp_4_data_str_id_1"] + tg2_stream_map = {s1: [ip1, p1], s2: [ip1, p2], s3: [ip2, p1], s4:[ip2, p2]} + for k, v in tg2_stream_map.items(): + if v ==[trn_ip, trn_port]: + return k + + +def util_check_nat_translations_count(dut, loopCnt, expcount): + flag = 0 + iter = 1 + while iter <= loopCnt: + ret_val = nat_obj.get_nat_translations_count(dut, counter_name="total_entries") + st.log("Number of NAT entries learned after iteration {} : {}".format(iter,ret_val)) + if int(ret_val) >= int(expcount): + flag = 1 + break + iter = iter+1 + if flag: + return True + else: + return False + +def nat_reboot_debug_fun(): + st.banner("Start of Collecting the needed debug info for failure analysis", width=100) + ip_obj.show_ip_route(vars.D1) + intf_obj.show_interface_counters_all(vars.D1) + arp_obj.show_arp(vars.D1) + nat_obj.show_nat_translations(vars.D1) + st.banner("End of Collecting the needed debug info for failure analysis", width=100) + diff --git a/spytest/tests/routing/NAT/test_nat_tcp.py b/spytest/tests/routing/NAT/test_nat_tcp.py new file mode 100644 index 00000000000..62874983cbc --- /dev/null +++ b/spytest/tests/routing/NAT/test_nat_tcp.py @@ -0,0 +1,138 @@ +import pytest + +from spytest import st +from spytest.dicts import SpyTestDict + +import utilities.common as utils + +import apis.routing.ip as ip_obj +import apis.routing.nat as nat_obj +import apis.system.basic as basic_obj + +dut = dict() + +data = SpyTestDict() +data.d1d2_ip_addr = "44.44.44.1" +data.d2d1_ip_addr = "44.44.44.2" +data.d2d3_ip_addr = "12.12.12.1" +data.d3d2_ip_addr = "12.12.12.2" +data.ip_addr_mask = "24" +data.d1_static_nw = "12.12.12.0" +data.d3_static_nw = "44.44.44.0" +data.zone_1 = "0" +data.zone_2 = "1" +data.proto_all = "all" +data.proto_tcp = "tcp" +data.proto_udp = "udp" +data.pool_name = ["pool_tr"] +data.bind_name = ["bind_tr"] +data.global_port_range = "2000-2002" +data.global_port = ["2000","2001","2002"] +data.af_ipv4 = "ipv4" +data.shell_sonic = "sonic" +data.shell_vtysh = "vtysh" +data.config_add='add' +data.config_del='del' + +@pytest.fixture(scope="module", autouse=True) +def nat_module_config(request): + nat_pre_config() + yield + nat_post_config() + + +@pytest.fixture(scope="function") +def cmds_func_hooks(request): + # add things at the start every test case + # use 'st.get_func_name(request)' to compare + # if any thing specific a particular test case + yield + +def nat_pre_config(): + global vars + vars = st.ensure_min_topology("D1D2:1", "D2D3:1") + platform = basic_obj.get_hwsku(vars.D2) + common_constants = st.get_datastore(vars.D2, "constants", "default") + if platform.lower() in common_constants['TH3_PLATFORMS']: + st.error("NAT is not supported for this platform {}".format(platform)) + st.report_unsupported('NAT_unsupported_platform',platform) + ip_obj.config_ip_addr_interface(vars.D1, vars.D1D2P1, data.d1d2_ip_addr, data.ip_addr_mask, family=data.af_ipv4) + ip_obj.config_ip_addr_interface(vars.D2, vars.D2D1P1, data.d2d1_ip_addr, data.ip_addr_mask, family=data.af_ipv4) + ip_obj.config_ip_addr_interface(vars.D2, vars.D2D3P1, data.d2d3_ip_addr, data.ip_addr_mask, family=data.af_ipv4) + ip_obj.config_ip_addr_interface(vars.D3, vars.D3D2P1, data.d3d2_ip_addr, data.ip_addr_mask, family=data.af_ipv4) + ip_obj.create_static_route(vars.D1, data.d2d1_ip_addr,"{}/{}".format(data.d1_static_nw, data.ip_addr_mask), + shell=data.shell_vtysh, family=data.af_ipv4) + ip_obj.create_static_route(vars.D3, data.d2d3_ip_addr, "{}/{}".format(data.d3_static_nw, data.ip_addr_mask), + shell=data.shell_vtysh, family=data.af_ipv4) + st.log("NAT Configuration") + nat_obj.config_nat_feature(vars.D2, 'enable') + util_nat_zone_config(vars.D2, [vars.D2D1P1, vars.D2D3P1], [data.zone_1, data.zone_2], config=data.config_add) + st.log("Creating NAT Pool") + nat_obj.config_nat_pool(vars.D2, pool_name=data.pool_name[0], global_ip_range=data.d2d1_ip_addr, + global_port_range=data.global_port_range, config=data.config_add) + st.log("Creating NAT Pool binding") + nat_obj.config_nat_pool_binding(vars.D2, binding_name=data.bind_name[0], pool_name=data.pool_name[0], + config=data.config_add) + utils.exec_all(True, [[ip_obj.show_ip_route, vars.D1], [ip_obj.show_ip_route, vars.D2]]) + ip_obj.show_ip_route(vars.D3) + + +@pytest.mark.nat_regression1 +def test_ft_dynamic_napt_traceroute(): + # ################ Author Details ################ + # Name: Kiran Vedula + # Eamil: kiran-kumar.vedulaa@broadcom.com + # ################################################ + # Objective - Verify traceroute over a NAT translation + # ################################################# + nat_obj.clear_nat(vars.D2, translations=True) + nat_obj.clear_nat(vars.D2, statistics=True) + + ip_obj.ping(vars.D3, data.d1d2_ip_addr, family='ipv4', count=3) + ip_obj.traceroute(vars.D3, data.d1d2_ip_addr) + + trn_val = nat_obj.get_nat_translations(vars.D2, protocol=data.proto_udp, dst_ip=data.d2d1_ip_addr, + dst_ip_port=data.global_port[0]) + if not trn_val: + ip_obj.ping(vars.D1, data.d2d1_ip_addr, family='ipv4', count=1) + ip_obj.ping(vars.D1, data.d3d2_ip_addr, family='ipv4', count=1) + st.error("Received empty list0,nat translation table not updated") + st.report_fail("traceroute_over_nat_failed") + + trn_val = nat_obj.get_nat_translations(vars.D2, protocol=data.proto_udp, dst_ip=data.d2d1_ip_addr, + dst_ip_port=data.global_port[1]) + if not trn_val: + ip_obj.ping(vars.D2, data.d3d2_ip_addr, family='ipv4', count=1) + st.error("Received empty list1,nat translation table not updated") + st.report_fail("traceroute_over_nat_failed") + + trn_val = nat_obj.get_nat_translations(vars.D2, protocol=data.proto_udp, dst_ip=data.d2d1_ip_addr, + dst_ip_port=data.global_port[2]) + if not trn_val: + st.error("Received empty list2,nat translation table not updated") + st.report_fail("traceroute_over_nat_failed") + st.report_pass("traceroute_over_nat_translation_successful") + +def nat_post_config(): + vars = st.get_testbed_vars() + util_nat_zone_config(vars.D2, [vars.D2D1P1, vars.D2D3P1], [data.zone_1, data.zone_2], config=data.config_del) + nat_obj.clear_nat_config(vars.D2) + nat_obj.config_nat_feature(vars.D2, 'disable') + ip_obj.delete_static_route(vars.D1, data.d2d1_ip_addr,"{}/{}".format(data.d1_static_nw, data.ip_addr_mask)) + ip_obj.delete_static_route(vars.D3, data.d2d3_ip_addr,"{}/{}".format(data.d3_static_nw, data.ip_addr_mask)) + ip_obj.clear_ip_configuration(st.get_dut_names()) + + + +def util_nat_zone_config(dut,intf,zone,config): + if config == data.config_add: + st.log("zone value configuration") + for i in range(len(intf)): + nat_obj.config_nat_interface(dut, interface_name=intf[i], zone_value=zone[i], config=data.config_add) + else: + st.log("zone value un configuration") + for i in range(len(intf)): + nat_obj.config_nat_interface(dut, interface_name=intf[i], zone_value=zone[i], config=data.config_del) + + return True + diff --git a/spytest/tests/routing/VRF/test_vrf.py b/spytest/tests/routing/VRF/test_vrf.py new file mode 100644 index 00000000000..a31195a2ec9 --- /dev/null +++ b/spytest/tests/routing/VRF/test_vrf.py @@ -0,0 +1,981 @@ +################################################################################## +#Script Title : VRF Lite +#Author : Manisha Joshi +#Mail-id : manisha.joshi@broadcom.com +################################################################################# + +import pytest +from spytest import st,utils + +from vrf_vars import * #all the variables used for vrf testcase +from vrf_vars import data +import vrf_lib as loc_lib +from utilities import parallel +from apis.system import basic + +import apis.switching.portchannel as pc_api + +import apis.routing.ip as ip_api +import apis.routing.vrf as vrf_api +import apis.routing.bgp as bgp_api +import apis.routing.ip_bgp as ip_bgp +import apis.routing.arp as arp_api + +import apis.system.reboot as reboot_api + +from spytest.tgen.tg import tgen_obj_dict +from spytest.tgen.tgen_utils import validate_tgen_traffic + + +#Topology: +#------#TG#----(2links)----#DUT1#----(4links)----#DUT2#----(2links)-----#TG#-------# + +def initialize_topology(): +# code for ensuring min topology + st.log("Initialize.............................................................................................................") + vars = st.ensure_min_topology("D1D2:4", "D1T1:2", "D2T1:2") + data.dut_list = st.get_dut_names() + data.dut1 = data.dut_list[0] + data.dut2 = data.dut_list[1] + utils.exec_all(True,[[bgp_api.enable_docker_routing_config_mode,data.dut1], [bgp_api.enable_docker_routing_config_mode,data.dut2]]) + data.d1_dut_ports = [vars.D1D2P1,vars.D1D2P2, vars.D1D2P3, vars.D1D2P4] + data.d2_dut_ports = [vars.D2D1P1, vars.D2D1P2,vars.D2D1P3, vars.D2D1P4] + data.dut1_tg1_ports = [vars.D1T1P1] + data.dut2_tg1_ports = [vars.D2T1P1] + data.tg_dut1_hw_port = vars.T1D1P1 + data.tg_dut2_hw_port = vars.T1D2P1 + data.tg1 = tgen_obj_dict[vars['tgen_list'][0]] + data.tg2 = tgen_obj_dict[vars['tgen_list'][0]] + data.tg_dut1_p1 = data.tg1.get_port_handle(vars.T1D1P1) + data.tg_dut2_p1 = data.tg2.get_port_handle(vars.T1D2P1) + data.d1_p1_intf_v4 = {} + data.d1_p1_intf_v6 = {} + data.d2_p1_intf_v4 = {} + data.d2_p1_intf_v6 = {} + data.d1_p1_bgp_v4 = {} + data.d1_p1_bgp_v6 = {} + data.d2_p1_bgp_v4 = {} + data.d2_p1_bgp_v6 = {} + data.stream_list = {} + +@pytest.fixture(scope='module', autouse = True) +def prologue_epilogue(): + initialize_topology() + loc_lib.vrf_base_config() + # import pdb; pdb.set_trace() + # import code; code.interact(local=globals()) + yield + #loc_lib.vrf_base_unconfig() + +@pytest.mark.sanity +def test_VrfFun001_06(): + st.log('#######################################################################################################################') + st.log('######------ Combining FtRtVrfFun001 and FtRtVrfFun006-----######') + st.log('######------ FtRtVrfFun001: Verify address family IPv4 and IPv6 in VRF instance-----######') + st.log('######------FtRtVrfFun006: Configure multiple interfaces to a VRF and configure same interface to multiple VRFs-----######') + st.log('#######################################################################################################################') + + ############################################################################################################################### + + result = 0 + output = vrf_api.get_vrf_verbose(dut = data.dut1,vrfname = vrf_name[0]) + if vrf_name[0] in output['vrfname']: + st.log('VRF configured on DUT1 is as expected',vrf_name[0]) + else: + st.log('VRF name configured on DUT1 is as not expected',vrf_name[0]) + result += 1 + for value in output['interfaces']: + if data.d1_dut_ports[0] or dut1_loopback[0] or data.dut1_loopback[1] or value == 'Vlan1': + st.log('Bind to VRF is as expected',value) + else: + st.log('Bind to VRF is not as expected',value) + result += 1 + output = vrf_api.get_vrf_verbose(dut = data.dut2,vrfname = vrf_name[0]) + if vrf_name[0] in output['vrfname']: + st.log('VRF configured on DUT1 is as expected',vrf_name[0]) + else: + st.log('VRF name configured on DUT1 is as not expected',vrf_name[0]) + result += 1 + for value in output['interfaces']: + if data.d2_dut_ports[0] or dut2_loopback[0] or value == 'Vlan6': + st.log('Bind to VRF is as expected',value) + else: + st.log('Bind to VRF is not as expected',value) + result += 1 + if not ip_api.verify_interface_ip_address(data.dut1, 'PortChannel10' ,dut1_dut2_vrf_ip[0]+'/24', vrfname = vrf_name[2]): + st.log('IPv4 address configuration on portchannel interface failed') + result += 1 + if not ip_api.verify_interface_ip_address(data.dut2, 'PortChannel10' ,dut2_dut1_vrf_ipv6[0]+'/64', vrfname = vrf_name[2],family='ipv6'): + st.log('IPv6 address configuration on portchannel interface failed') + result += 1 + if arp_api.get_arp_count(data.dut1, vrf = vrf_name[1]) < 2: + st.log('ARP entry for VRF-102 not as expected on DUT1') + result += 1 + if arp_api.get_arp_count(data.dut2, vrf = vrf_name[1]) < 2: + st.log('ARP entry for VRF-102 not as expected on DUT2') + result += 1 + if arp_api.get_ndp_count(data.dut1, vrf = vrf_name[1]) < 2: + st.log('NDP entry for VRF-102 not as expected on DUT1') + result += 1 + if arp_api.get_ndp_count(data.dut2, vrf = vrf_name[1]) < 2: + st.log('NDP entry for VRF-102 not as expected on DUT2') + result += 1 + if not loc_lib.verify_bgp(phy = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-101 did not come up') + result += 1 + if not loc_lib.verify_bgp(ve = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.verify_bgp(ve = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.verify_bgp(pc = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-103 did not come up') + result += 1 + if not loc_lib.verify_bgp(pc = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-103 did not come up') + result += 1 + if result == 0 : + st.report_pass('test_case_passed') + else: + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +#@pytest.mark.depends('test_VrfFun001_06') +def lib_test_VrfFun002(): + result = 0 + loc_lib.clear_tg() + st.log('Verify ping and traceroute on physical interface on non default vrf for both IPv4 and IPv6') + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ip[0], interface= vrf_name[0], count = 2): + st.log('IPv4 Ping from Vrf-101-DUT1 to Vrf-101-DUT2 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ip[0], vrf_name= vrf_name[0], timeout = 3): + st.log('IPv4 Traceroute from Vrf-101-DUT1 to Vrf-101-DUT2 failed') + result += 1 + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', interface= vrf_name[0], count = 2): + st.log('IPv6 Ping from Vrf-101-DUT1 to Vrf-101-DUT2 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', vrf_name= vrf_name[0], timeout = 3): + st.log('IPv6 Traceroute from Vrf-101-DUT1 to Vrf-101-DUT2 failed') + result += 1 + if not ip_api.verify_ip_route(data.dut1, vrf_name = vrf_name[0], type='B', nexthop = tg1_dut1_vrf_ip[0], interface = 'Vlan'+dut1_tg1_vlan[0]): + st.log('IPv4 routes on VRF-101, not learnt on DUT1') + result += 1 + if not ip_api.verify_ip_route(data.dut2, vrf_name = vrf_name[0], type='B', nexthop = dut1_dut2_vrf_ip[0], interface = data.d2_dut_ports[0]): + st.log('IPv4 routes on VRF-101, not learnt on DUT2') + result += 1 + loc_lib.clear_tg() + if not ip_api.verify_ip_route(data.dut1, vrf_name = vrf_name[0], type='B', nexthop = tg1_dut1_vrf_ipv6[0], interface = 'Vlan'+dut1_tg1_vlan[0], family = 'ipv6'): + st.log('IPv6 routes on VRF-101, not learnt on DUT1') + result += 1 + if not ip_api.verify_ip_route(data.dut2, vrf_name = vrf_name[0], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = data.d2_dut_ports[0], family = 'ipv6'): + st.log('IPv6 routes on VRF-101, not learnt on DUT2') + result += 1 + return result + +@pytest.mark.sanity +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun002(): + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun002: Bind/unbind/rebind VRF to a physical interface -----######') + st.log('#######################################################################################################################') + result = 0 + st.log('######------Unbind and rebind DUT1 <--> DUT2 physical interfaces to vrf and config v4 and v6 addresses------######') + loc_lib.dut_vrf_bind(phy = '1', config = 'no') + loc_lib.dut_vrf_bind(phy = '1') + result = lib_test_VrfFun002() + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('BGP is not converging after unbind/bind on physical interface') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +def lib_test_VrfFun003(): + + ############################################################################################################################### + result = 0 + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.get('ve_v4_stream'), duration = '2') + st.log('Verify ping and traceroute on virtual interface non-default vrf for both IPv4 and IPv6') + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ip[0], interface= vrf_name[1], count = 2): + st.log('IPv4 Ping from Vrf-102-DUT1 to vrf DUT2-102 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ip[0], vrf_name= vrf_name[1], timeout = 3): + st.log('IPv4 Traceroute Vrf-102-DUT1 to vrf DUT2-102 failed') + result += 1 + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', interface= vrf_name[1], count = 2): + st.log('IPv6 Ping Vrf-102-DUT1 to vrf DUT2-102 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', vrf_name= vrf_name[1], timeout = 3): + st.log('IPv6 Traceroute Vrf-102-DUT1 to vrf DUT2-102 failed') + result += 1 + if not ip_api.verify_ip_route(data.dut1, vrf_name = vrf_name[1], type='B', nexthop = tg1_dut1_vrf_ip[1], interface = 'Vlan'+dut1_tg1_vlan[1]): + st.log('IPv4 routes on VRF-102, not learnt on DUT1') + result += 1 + if not ip_api.verify_ip_route(data.dut2, vrf_name = vrf_name[1], type='B', nexthop = dut1_dut2_vrf_ip[0], interface = 'Vlan'+dut2_dut1_vlan[0]): + st.log('IPv4 routes on VRF-102, not learnt on DUT2') + result += 1 + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('ve_v4_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('ve_v4_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv4 Traffic on VRF-102 bound to virtual interfaces failed') + result += 1 + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.get('ve_v6_stream'), duration = '2') + if not ip_api.verify_ip_route(data.dut1, vrf_name = vrf_name[1], type='B', nexthop = tg1_dut1_vrf_ipv6[1], interface = 'Vlan'+dut1_tg1_vlan[1],family='ipv6'): + st.log('IPv6 routes on VRF-102, not learnt on DUT1') + result += 1 + if not ip_api.verify_ip_route(data.dut2, vrf_name = vrf_name[1], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = 'Vlan'+dut2_dut1_vlan[0],family='ipv6'): + st.log('IPv6 routes on VRF-102, not learnt on DUT2') + result += 1 + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('ve_v6_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('ve_v6_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv4 Traffic on VRF-102 bound to virtual interfaces failed') + result += 1 + return result + +@pytest.mark.sanity +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun003(): + + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun003: Bind/unbind/rebind VRF to a virtual interface -----######') + st.log('#######################################################################################################################') + + result = 0 + st.log('######------Unbind DUT1 <--> DUT2 physical interfaces to vrf and config v4 and v6 addresses------######') + loc_lib.dut_vrf_bind(ve = '1', config = 'no') + st.log('######------Rebind DUT1 <--> DUT2 physical interfaces to vrf and config v4 and v6 addresses------######') + loc_lib.dut_vrf_bind(ve = '1') + result = lib_test_VrfFun003() + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('BGP is not converging after unbind/bind on virtual interface') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +def lib_test_VrfFun004(): + result = 0 + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.values(), duration = '2') + st.log('Verify ping and traceroute on portchannel non-default vrf for both IPv4 and IPv6') + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ip[0], interface= vrf_name[2], count = 2): + st.log('IPv4 Ping from Vrf-103-DUT1 to Vrf-103-DUT2 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ip[0], vrf_name= vrf_name[2], timeout = 3): + st.log('IPv4 Traceroute from Vrf-103-DUT1 to Vrf-103-DUT2 failed') + result += 1 + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', interface= vrf_name[2], count = 2): + st.log('IPv6 Ping from Vrf-103-DUT1 to Vrf-103-DUT2 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', vrf_name= vrf_name[2], timeout = 3): + st.log('IPv6 Traceroute from Vrf-103-DUT1 to Vrf-103-DUT2 failed') + result += 1 + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut1, vrf_name = vrf_name[2], type='B', nexthop = tg1_dut1_vrf_ip[2], interface = 'Vlan'+dut1_tg1_vlan[2], retry_count= 2, delay= 5): + st.log('IPv4 routes on VRF-103, not learnt on DUT1') + result += 1 + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, vrf_name = vrf_name[2], type='B', nexthop = dut1_dut2_vrf_ip[0], interface = 'PortChannel10', retry_count= 2, delay= 5): + st.log('IPv4 routes on VRF-103, not learnt on DUT2') + result += 1 + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('pc_v4_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = [data.stream_list.values()]) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv4 Traffic on VRF-103 bound to port channel failed') + result += 1 + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.values(), duration = '2') + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut1, family='ipv6', vrf_name = vrf_name[2], type='B', nexthop = tg1_dut1_vrf_ipv6[2], interface = 'Vlan'+dut1_tg1_vlan[2], retry_count= 2, delay= 5): + st.log('IPv6 routes on VRF-103, not learnt on DUT1') + result += 1 + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, family='ipv6', vrf_name = vrf_name[2], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = 'PortChannel10',retry_count= 2, delay= 5): + st.log('IPv6 routes on VRF-103, not learnt on DUT2') + result += 1 + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('pc_v6_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('pc_v6_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv6 Traffic on VRF-103 bound to port channel failed') + result += 1 + return result + +@pytest.mark.sanity +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun004(): + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun004: Bind/unbind/rebind VRF to a port channel interface -----######') + st.log('#######################################################################################################################') + + result = 0 + st.log('######------Unbind DUT1 <--> DUT2 physical interfaces to vrf and config v4 and v6 addresses------######') + loc_lib.dut_vrf_bind(pc = '1', config = 'no') + st.log('Verify ping and traceroute on portchannel global vrf for both IPv4 and IPv6') + utils.exec_all(True, [[pc_api.create_portchannel, data.dut1, 'PortChannel10'], [pc_api.create_portchannel, data.dut2, 'PortChannel10']]) + utils.exec_all(True, [[pc_api.add_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[2]], [pc_api.add_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[2]]]) + utils.exec_all(True, [[pc_api.add_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[3]], [pc_api.add_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[3]]]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ip[0], count = 2): + st.log('IPv4 Ping from Portchannel10-DUT1 to Portchannel10-DUT2 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ip[0],timeout = 3): + st.log('IPv4 Traceroute from Portchannel10-DUT1 to Portchannel10-DUT2 failed') + result += 1 + if not ip_api.ping(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', count = 2): + st.log('IPv6 Ping from Portchannel10-DUT1 to Portchannel10-DUT2 failed') + result += 1 + if not ip_api.traceroute(data.dut1, dut2_dut1_vrf_ipv6[0], family='ipv6', timeout = 3): + st.log('IPv6 Traceroute from Portchannel10-DUT1 to Portchannel10-DUT2 failed') + result += 1 + st.log('######------Delete the member port and port-channel------######') + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True, [[pc_api.add_del_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[2],'del'], [pc_api.add_del_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[2],'del']]) + utils.exec_all(True, [[pc_api.add_del_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[3],'del'], [pc_api.add_del_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[3],'del']]) + utils.exec_all(True, [[pc_api.delete_portchannel, data.dut1, 'PortChannel10'], [pc_api.delete_portchannel, data.dut2, 'PortChannel10']]) + st.log('######------Rebind DUT1 <--> DUT2 physical interfaces to vrf and config v4 and v6 addresses------######') + loc_lib.dut_vrf_bind(pc = '1') + result = lib_test_VrfFun004() + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('BGP is not converging after unbind/bind on portchannel') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_07_08(request,prologue_epilogue): + yield + st.log('######------Delete the physical interface from port channel------######') + pc_api.add_del_portchannel_member(data.dut1, 'PortChannel10', data.d1_dut_ports[0], flag='del') + pc_api.add_del_portchannel_member(data.dut2, 'PortChannel10', data.d2_dut_ports[0], flag='del') + st.log('######------Bind DUT1 <--> DUT2 one physical interface to vrf-101 and config v4 and v6 addresses------######') + dict1 = {'vrf_name':vrf_name[0], 'intf_name':data.d1_dut_ports[0],'skip_error':True} + dict2 = {'vrf_name':vrf_name[0], 'intf_name':data.d2_dut_ports[0],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[0],dut1_loopback_ip[0],dut1_loopback_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[0],dut2_loopback_ip[0],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[0],dut1_loopback_ipv6[0],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[0],dut2_loopback_ipv6[0],dut2_loopback_ipv6_subnet,'ipv6']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ip[0],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ip[0], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ipv6[0],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ipv6[0], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + +@pytest.mark.functionality +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_07_08(vrf_fixture_tc_07_08): + + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun007: Dynamically change port membership from one non-default VRF to another-----######') + st.log('######------FtRtVrfFun008: Dynamically modify port channel interfaces in a vrf -----######') + st.log('#######################################################################################################################') + result = 0 + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ip[0],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ip[0], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ipv6[0],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ipv6[0], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[0],dut1_loopback_ip[0],dut1_loopback_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[0],dut2_loopback_ip[0],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[0],dut1_loopback_ipv6[0],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[0],dut2_loopback_ipv6[0],dut2_loopback_ipv6_subnet,'ipv6']]) + st.log('######------Unbind DUT2 <--> DUT1 one physical interface from vrf-101------######') + vrf_api.bind_vrf_interface(dut = data.dut1, vrf_name = vrf_name[0], intf_name = data.d1_dut_ports[0], skip_error = True, config = 'no') + vrf_api.bind_vrf_interface(dut = data.dut2, vrf_name = vrf_name[0], intf_name = data.d2_dut_ports[0], skip_error = True, config = 'no') + st.log('######------Add the physical ports to the port channel------######') + pc_api.add_del_portchannel_member(data.dut1, 'PortChannel10', data.d1_dut_ports[0], flag = 'add') + pc_api.add_del_portchannel_member(data.dut2, 'PortChannel10', data.d2_dut_ports[0], flag = 'add') + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.get('pc_v6_stream'), duration = '2') + if not pc_api.verify_portchannel(data.dut1, 'PortChannel10'): + st.log('Port channel not up after adding memebers from another vrf') + result += 1 + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('pc_v6_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('pc_v6_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv6 Traffic on Port channel failed') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Changing port membership from one vrf to another vrf failed') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.mark.functionality +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_34_46(): + + st.log('#######################################################################################################################') + st.log('######------Combining FtRtVrfFun005, FtRtVrfFun034 and FtRtVrfFun046 -----######') + st.log('#######################################################################################################################') + result = 0 + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_name[0], family = 'ipv4') + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_name[0], family = 'ipv6') + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_name[1], family = 'ipv4') + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_name[1], family = 'ipv6') + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_name[2], family = 'ipv4') + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_name[2], family = 'ipv6') + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, vrf_name = vrf_name[1], type='B', nexthop = dut1_dut2_vrf_ip[0], interface = 'Vlan'+dut2_dut1_vlan[0], retry_count= 2, delay= 5): + st.log('IPv4 routes on VRF-102, not learnt on DUT2') + result += 1 + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, vrf_name = vrf_name[1], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = 'Vlan'+dut2_dut1_vlan[0],family='ipv6',retry_count= 2, delay= 5): + st.log('IPv6 routes on VRF-102, not learnt on DUT2') + result += 1 + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, family='ipv6', vrf_name = vrf_name[2], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = 'PortChannel10',retry_count= 2, delay= 5): + st.log('IPv6 routes on VRF-103, not learnt on DUT2') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +def vrf_tc_38_39_48(): + st.log('IPv6 BGP session did not come up, after delete/add IPv6 IBGP and EBGP config.') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config_type_list =['neighbor']) + st.log('######------Readd EBGP IPv6 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ipv6[2], remote_as = dut1_tg_as, config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config_type_list =['activate','nexthop_self']) + st.log('######------Readd EBGP IPv6 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ipv6[2], remote_as = dut1_tg_as, config_type_list =['activate']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], local_as = dut1_as[2], config = 'yes', addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=tg1_dut1_vrf_ipv6[2]) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], local_as = dut1_as[2], config = 'yes', addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut2_dut1_vrf_ipv6[0]) + +@pytest.mark.functionality +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_38_39_48(): + + st.log('#######################################################################################################################') + st.log('######------Combined FtRtVrfFun038, FtRtVrfFun039 and FtRtVrfFun048-----######') + st.log('######------FtRtVrfFun038 Verify IBGP neighbor for BGPv6 in vrf for ipv6 -----######') + st.log('######------FtRtVrfFun039 Verify EBGP neighbor for BGPv6 in vrf for ipv6 -----######') + st.log('######------FtRtVrfFun048 Verify BGP4+ route-map functionality in non-default VRF -----######') + st.log('#######################################################################################################################') + result = 0 + st.log('######------Remove IBGP IPv6 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', local_as = dut1_as[2], neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config = 'no', config_type_list =['neighbor']) + st.log('######------Readd IBGP IPv6 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config_type_list =['activate','nexthop_self']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], local_as = dut1_as[2], config = 'yes', addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut2_dut1_vrf_ipv6[0]) + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, family='ipv6', vrf_name = vrf_name[2], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = 'PortChannel10',retry_count= 2, delay= 10): + st.log('IPv6 routes on VRF-102, not learnt on DUT2') + result += 1 + basic.get_techsupport(filename='test_VrfFun_38_39_48_ipv6_routes') + st.log('######------Remove EBGP IPv6 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ipv6[2], remote_as = dut1_tg_as, config = 'no', config_type_list =['neighbor']) + st.log('######------Readd EBGP IPv6 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ipv6[2], remote_as = dut1_tg_as, config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], addr_family ='ipv6', config = 'yes', local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ipv6[2], remote_as = dut1_tg_as, config_type_list =['activate']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], local_as = dut1_as[2], config = 'yes', addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=tg1_dut1_vrf_ipv6[2]) + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, family='ipv6', vrf_name = vrf_name[2], type='B', nexthop = dut1_dut2_vrf_ipv6[0], interface = 'PortChannel10',retry_count= 3, delay= 10): + st.log('IPv6 routes on VRF-102, not learnt on DUT2') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + vrf_tc_38_39_48() + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_31_43(request,prologue_epilogue): + yield + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'no', config_type_list =['redist'], redistribute ='connected') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2],local_as = dut1_as[2], addr_family ='ipv6', neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config = 'no', config_type_list =['redist'], redistribute ='connected') + +@pytest.mark.functionality +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_31_43(vrf_fixture_tc_31_43): + st.log('#######################################################################################################################') + st.log('######------Combined FtRtVrfFun031 and FtRtVrfFun043 -----######') + st.log('######------FtRtVrfFun031 Redistribute connected IPv4 routes into IBGP in non-default vrf -----######') + st.log('######------FtRtVrfFun043 Redistribute connected IPv6 routes into IBGP in non-default vrf -----######') + st.log('#######################################################################################################################') + + result = 0 + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'yes', config_type_list =['redist'], redistribute ='connected') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2],local_as = dut1_as[2], addr_family ='ipv6', neighbor = dut2_dut1_vrf_ipv6[0], remote_as = dut2_as[2], config = 'yes', config_type_list =['redist'], redistribute ='connected') + st.wait(5) + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.get('pc_v6_stream'), duration = '2') + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('pc_v6_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('pc_v6_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv6 Traffic on VRF-103 failed') + result += 1 + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.get('phy_v4_stream'), duration = '2') + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('phy_v4_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('phy_v4_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv4 Traffic on VRF-101 failed') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Redistribute connected IPv4 and IPv6 routes into IBGP in VRF-103 failed') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +def vrf_tc_26_27(): + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ip[0], remote_as = dut1_tg_as, config = 'yes',config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ip[0], remote_as = dut1_tg_as, config = 'yes',config_type_list =['activate']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'yes', config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'yes', config_type_list =['activate','nexthop_self']) + +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_26_27(): + st.log('#######################################################################################################################') + st.log('######------Combined FtRtVrfFun026 and FtRtVrfFun027 -----######') + st.log('######------ FtRtVrfFun026: Verify IBGP neighbor for BGPv4 in vrf -----######') + st.log('######------FtRtVrfFun027 Verify EBGP neighbor for BGPv4 in vrf for ipv4 -----######') + st.log('#######################################################################################################################') + + result = 0 + st.log('######------Remove EBGP IPv4 neighbor configuration from all the VRFs ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0], local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'no', config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'yes', config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = dut2_dut1_vrf_ip[0], remote_as = dut2_as[0], config = 'yes', config_type_list =['activate','nexthop_self']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0], local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ip[0], remote_as = dut1_tg_as, config = 'no', config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ip[0], remote_as = dut1_tg_as, config = 'yes',config_type_list =['neighbor']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0],local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ip[0], remote_as = dut1_tg_as, config = 'yes',config_type_list =['activate']) + st.wait(5) + if not ip_api.verify_ip_route(data.dut1, vrf_name = vrf_name[0], type='B', nexthop = tg1_dut1_vrf_ip[0], interface = 'Vlan'+dut1_tg1_vlan[0]): + st.log('IPv4 routes on VRF-101, not learnt on DUT1') + result += 1 + if not ip_api.verify_ip_route(data.dut2, vrf_name = vrf_name[0], type='B', nexthop = dut1_dut2_vrf_ip[0], interface = data.d2_dut_ports[0]): + st.log('IPv4 routes on VRF-101, not learnt on DUT2') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('IPv4 BGP session did not come up, after delete/add IPv4 IBGP and EBGP config') + vrf_tc_26_27() + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_28_36_43_47(request,prologue_epilogue): + #1234 + yield + # import pdb; pdb.set_trace() + # import code; code.interact(local=globals()) + #for nbr_1,nbr_2 in zip(dut2_dut1_vrf_ip[0:2],dut1_dut2_vrf_ip[0:2]): + # dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'peergroup':'peergroup_v4','config_type_list':['peergroup']} + # dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'peergroup':'peergroup_v4','config_type_list':['peergroup']} + # parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + # for nbr_1,nbr_2 in zip(dut2_dut1_vrf_ipv6[0:2],dut1_dut2_vrf_ipv6[0:2]): + # dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'peergroup':'peergroup_v6','config_type_list':['peergroup'],'addr_family':'ipv6'} + # dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'peergroup':'peergroup_v6','config_type_list':['peergroup'],'addr_family':'ipv6'} + # parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + bgp_api.activate_bgp_neighbor(data.dut1, dut1_as[1], dut2_dut1_vrf_ip[1], family="ipv4", config='no',vrf=vrf_name[1], remote_asn=dut2_as[1]) + bgp_api.activate_bgp_neighbor(data.dut1, dut1_as[1], dut2_dut1_vrf_ipv6[1], family="ipv6", config='no',vrf=vrf_name[1], remote_asn=dut2_as[1]) + dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[1],'remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[1],'remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + bgp_api.activate_bgp_neighbor(data.dut2, dut2_as[1], dut1_dut2_vrf_ip[1], family="ipv4", config='no',vrf=vrf_name[1], remote_asn=dut1_as[1]) + bgp_api.activate_bgp_neighbor(data.dut2, dut2_as[1], dut1_dut2_vrf_ipv6[1], family="ipv6", config='no',vrf=vrf_name[1], remote_asn=dut1_as[1]) + dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[1],'remote_as':dut2_as[1],'addr_family':'ipv6','config_type_list':['neighbor']} + dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[1],'remote_as':dut1_as[1],'addr_family':'ipv6','config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + +@pytest.mark.functionality +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_28_36_43_47(vrf_fixture_tc_28_36_43_47): + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun026 Verify the EBGP peer connection and route advertisement under IPV4 address-family in non-default vrf -----######') + st.log('######------FtRtVrfFun036 Verify BGP peer-group for IPv4 address family on non-default VRF -----######') + st.log('######------FtRtVrfFun043 Verify the EBGP peer connection and route advertisement under IPV6 address-family in non-default vrf -----######') + st.log('######------FtRtVrfFun047 Verify BGP peer-group for IPv6 address family with EBGP neighbors non-default VRF-----######') + st.log('#######################################################################################################################') + result = 0 + st.log('Configuring Vlan102 as another neighbor in VRF-102') + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[1],'remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[1],'remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[1],'remote_as':dut2_as[1],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[1],'remote_as':dut1_as[1],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + st.log('Configuring Vlan102 as another neighbor in VRF-102') + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[1],'remote_as':dut2_as[1],'addr_family':'ipv6','config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[1],'remote_as':dut1_as[1],'addr_family':'ipv6','config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[1],'remote_as':dut2_as[1],'addr_family':'ipv6','config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[1],'remote_as':dut1_as[1],'addr_family':'ipv6','config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + st.log('Configuring IPv4 peer group for Vlan101 and Vlan 102') + for nbr_1,nbr_2 in zip(dut2_dut1_vrf_ip[0:2],dut1_dut2_vrf_ip[0:2]): + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'peergroup':'peergroup_v4','config_type_list':['peergroup'],'remote_as':dut2_as[1],'neighbor':nbr_1} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'peergroup':'peergroup_v4','config_type_list':['peergroup'],'remote_as':dut1_as[1],'neighbor':nbr_2} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + st.log('Configuring IPv6 peer group for Vlan101 and Vlan 102') + for nbr_1,nbr_2 in zip(dut2_dut1_vrf_ipv6[0:2],dut1_dut2_vrf_ipv6[0:2]): + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'peergroup':'peergroup_v6','config_type_list':['peergroup'],'remote_as':dut2_as[1],'neighbor':nbr_1,'addr_family':'ipv6'} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'peergroup':'peergroup_v6','config_type_list':['peergroup'],'remote_as':dut1_as[1],'neighbor':nbr_2,'addr_family':'ipv6'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + if not ip_bgp.verify_bgp_neighbor(data.dut1, neighborip = dut2_dut1_vrf_ip[1], state='Established', vrf = vrf_name[1]): + st.log('IPv6 IBGP neighbor did not come up on VRF-102 after peer-group configuration') + result += 1 + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.get('ve_v4_stream'), duration = '2') + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [[data.stream_list.get('ve_v4_stream')]]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.get('ve_v4_stream')) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + if not aggrResult: + st.log('IPv4 Traffic on VRF-102 failed after peer-group configuration') + result += 1 + if not ip_bgp.verify_bgp_neighbor(data.dut1, neighborip = dut2_dut1_vrf_ipv6[1], state='Established', vrf = vrf_name[1]): + st.log('IPv6 IBGP neighbor did not come up on VRF-102 after peer-group configuration') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Peer group verification for IPv4 and IPv6 in VRF-102 failed') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_35_49(request,prologue_epilogue): + yield + st.log('UnConfigure max path iBGP for IPv4 is 2 in DUT1 and 8 in DUT2') + dict1 = {'vrf_name':vrf_name[1],'local_as': dut1_as[1], 'max_path_ibgp': '', 'config_type_list': ["max_path_ibgp"], 'config' : 'no'} + dict2 = {'vrf_name':vrf_name[1],'local_as': dut2_as[1], 'max_path_ibgp': '', 'config_type_list': ["max_path_ibgp"], 'config' : 'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[1],'remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[1],'remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + ############################################################################################################################### + st.log('UnConfigure max path iBGP for IPv6 is 2 in DUT1 and 8 in DUT2') + dict1 = {'vrf_name':vrf_name[1],'local_as': dut1_as[1], 'max_path_ibgp': '', 'config_type_list': ["max_path_ibgp"],'addr_family' : 'ipv6', 'config' : 'no'} + dict2 = {'vrf_name':vrf_name[1],'local_as': dut2_as[1], 'max_path_ibgp': '', 'config_type_list': ["max_path_ibgp"],'addr_family' : 'ipv6', 'config' : 'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + bgp_api.config_bgp(dut = data.dut2, vrf_name = vrf_name[1], local_as = dut1_as[1], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut1_dut2_vrf_ipv6[0]) + dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[1],'addr_family':'ipv6','remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[1],'addr_family':'ipv6','remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_35_49(vrf_fixture_tc_35_49): + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun035 IPv4 ECMP in non-default vrf along with route leak into another vrf -----######') + st.log('######------FtRtVrfFun049 IPv6 ECMP in non-default vrf along with route leak into another vrf -----######') + st.log('#######################################################################################################################') + result = 0 + st.log('Configure max path iBGP for IPv4 is 2 in DUT1 and 8 in DUT2') + dict1 = {'vrf_name':vrf_name[1],'local_as': dut1_as[1], 'max_path_ibgp': 2, 'config_type_list': ["max_path_ibgp"]} + dict2 = {'vrf_name':vrf_name[1],'local_as': dut2_as[1], 'max_path_ibgp': 8, 'config_type_list': ["max_path_ibgp"]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + st.log('Configuring Vlan102 as another neighbor in VRF-102') + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[1],'remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[1],'remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[1],'remote_as':dut2_as[1],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[1],'remote_as':dut1_as[1],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, vrf_name = vrf_name[1], type='B', nexthop = dut1_dut2_vrf_ip[0], interface = 'Vlan'+dut2_dut1_vlan[0]): + st.log('IPv4 routes on VRF-102, not learnt on DUT2') + result += 1 + basic.get_techsupport(filename='test_VrfFun_35_49_ipv4_routes') + + st.log('Configure max path iBGP for IPv6 is 2 in DUT1 and 8 in DUT2') + dict1 = {'vrf_name':vrf_name[1],'local_as': dut1_as[1], 'max_path_ibgp': 2, 'config_type_list': ["max_path_ibgp"],'addr_family' : 'ipv6'} + dict2 = {'vrf_name':vrf_name[1],'local_as': dut2_as[1], 'max_path_ibgp': 8, 'config_type_list': ["max_path_ibgp"],'addr_family' : 'ipv6'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + st.log('for BGPv4+ Configuring Vlan102 as another neighbor in VRF-102') + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[1],'remote_as':dut2_as[1],'addr_family':'ipv6','config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[1],'remote_as':dut1_as[1],'addr_family':'ipv6','config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[1],'remote_as':dut2_as[1],'addr_family':'ipv6','config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[1],'remote_as':dut1_as[1],'addr_family':'ipv6','config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + bgp_api.config_bgp(dut = data.dut2, vrf_name = vrf_name[1], local_as = dut1_as[1], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut1_dut2_vrf_ipv6[1]) + bgp_api.config_bgp(dut = data.dut2, vrf_name = vrf_name[1], local_as = dut1_as[1], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut1_dut2_vrf_ipv6[0]) + if not loc_lib.retry_api(ip_api.verify_ip_route, dut = data.dut2, vrf_name = vrf_name[1], type='B', nexthop = dut1_dut2_vrf_ipv6[1], interface = 'Vlan'+dut2_dut1_vlan[1],family='ipv6'): + st.log('IPv6 routes on VRF-102, not learnt on DUT2') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Multipath IBGP verification for IPv4 and IPv6 in VRF-102 failed') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_10_12_14(request,prologue_epilogue): + yield + st.log('######------Delete the static routes configured on all VRFs------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ip[0], dut2_tg1_vrf_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[0], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ip[0], dut1_tg1_vrf_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[0],'no']]) + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ipv6[0], dut2_tg1_vrf_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[0], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ipv6[0], dut1_tg1_vrf_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[0],'no']]) + + ############################################################################################################################### + + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ip[1], dut2_tg1_vrf_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[1], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ip[1], dut1_tg1_vrf_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[1],'no']]) + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ipv6[1], dut2_tg1_vrf_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[1], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ipv6[1], dut1_tg1_vrf_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[1],'no']]) + + ############################################################################################################################### + + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ip[2], dut2_tg1_vrf_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[2], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ip[2], dut1_tg1_vrf_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[2],'no']]) + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ipv6[2], dut2_tg1_vrf_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[2], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ipv6[2], dut1_tg1_vrf_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[2],'no']]) + + ############################################################################################################################### + + loc_lib.dut_vrf_bgp(phy = '1') + loc_lib.dut_vrf_bgp(ve = '1') + loc_lib.dut_vrf_bgp(pc = '1') + loc_lib.tg_vrf_bgp(phy = '1') + loc_lib.tg_vrf_bgp(ve = '1') + loc_lib.tg_vrf_bgp(pc = '1') + +@pytest.mark.functionality +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_10_12_14(vrf_fixture_tc_10_12_14): + + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun0010: Add/delete static route under vrf with next hop as physical interface-----######') + st.log('######------FtRtVrfFun0012: Add/delete static route under vrf with next hop as virtual interface -----######') + st.log('######------FtRtVrfFun0014: Add/delete static route under vrf with next hop as port channel -----######') + st.log('#######################################################################################################################') + + result = 0 + loc_lib.dut_vrf_bgp(phy = '1', config = 'no') + loc_lib.dut_vrf_bgp(ve = '1', config = 'no') + loc_lib.dut_vrf_bgp(pc = '1', config = 'no') + st.log('######------Configure IPv4 static routes on VRF-101------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ip[0], dut2_tg1_vrf_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[0], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ip[0], dut1_tg1_vrf_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[0],'']]) + if not ip_api.ping(data.dut1, dut2_tg1_vrf_ip[0], interface= vrf_name[0], count = 2): + st.log('IPv4 Ping from Vrf-101-DUT1 to Vrf-101-DUT2 failed after static route configuration') + result += 1 + st.log('######------Configure IPv6 static routes on VRF-101------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ipv6[0], dut2_tg1_vrf_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[0], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ipv6[0], dut1_tg1_vrf_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[0],'']]) + if not ip_api.ping(data.dut1, dut2_tg1_vrf_ipv6[0], interface= vrf_name[0], count = 2, family='ipv6'): + st.log('IPv6 Ping from Vrf-101-DUT1 to Vrf-101-DUT2 failed after static route configuration') + result += 1 + st.log('######------Configure IPv4 static routes on VRF-102------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ip[1], dut2_tg1_vrf_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[1], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ip[1], dut1_tg1_vrf_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[1],'']]) + if not ip_api.ping(data.dut1, dut2_tg1_vrf_ip[1], interface= vrf_name[1], count = 2): + st.log('IPv4 Ping from Vrf-102-DUT1 to Vrf-102-DUT2 failed after static route configuration') + result += 1 + st.log('######------Configure IPv6 static routes on VRF-102------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ipv6[1], dut2_tg1_vrf_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[1], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ipv6[1], dut1_tg1_vrf_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[1],'']]) + if not ip_api.ping(data.dut1, dut2_tg1_vrf_ipv6[1], family='ipv6', interface= vrf_name[1], count = 2): + st.log('IPv6 Ping from Vrf-102-DUT1 to Vrf-102-DUT2 failed after static route configuration') + result += 1 + st.log('######------Configure IPv4 static routes on VRF-103------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ip[2], dut2_tg1_vrf_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[2], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ip[2], dut1_tg1_vrf_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[2],'']]) + if not ip_api.ping(data.dut1, dut2_tg1_vrf_ip[2], interface= vrf_name[2], count = 2): + st.log('IPv4 Ping from Vrf-103-DUT1 to Vrf-103-DUT2 failed after static route configuration') + result += 1 + st.log('######------Configure IPv6 static routes on VRF-103------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_tg1_vrf_ipv6[2], dut2_tg1_vrf_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[2], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_tg1_vrf_ipv6[2], dut1_tg1_vrf_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[2],'']]) + if not ip_api.ping(data.dut1, dut2_tg1_vrf_ipv6[2], family='ipv6', interface= vrf_name[2], count = 2): + st.log('IPv6 Ping from Vrf-103-DUT1 to Vrf-103-DUT2 failed after static route configuration') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Static route between VRFs failed for VRf-101, VRF-102 and VRF-103') + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_29_30_41_42_54_55(request,prologue_epilogue): + yield + st.log('######------Delete Loopback as BGP neighbor------######') + dict1 = {'vrf_name':vrf_name[1],'local_as':'104','config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + dict2 = {'vrf_name':vrf_name[1],'local_as':'105','config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + utils.exec_all(True,[[ip_api.config_static_route_vrf, data.dut1, dut2_loopback_ip[1], dut2_loopback_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[1], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_loopback_ip[1], dut1_loopback_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[1],'no']]) + dict1 = {'vrf_name':vrf_name[2],'local_as':'106','config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + dict2 = {'vrf_name':vrf_name[2],'local_as':'107','config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_loopback_ipv6[2], dut2_loopback_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[2], 'no'], [ip_api.config_static_route_vrf,data.dut2, dut1_loopback_ipv6[2], dut1_loopback_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[2],'no']]) + loc_lib.dut_vrf_bgp(ve = '1') + loc_lib.dut_vrf_bgp(pc = '1') + loc_lib.tg_vrf_bgp(ve = '1') + loc_lib.tg_vrf_bgp(pc = '1') + +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_29_30_41_42_54_55(vrf_fixture_tc_29_30_41_42_54_55): + + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun029: Verify multihop EBGP under IPv4 address-family in non-default vrfe-----######') + st.log('######------FtRtVrfFun030: Add/delete non-default vrf with both single hop and multihop EBGP sessions e-----######') + st.log('######------FtRtVrfFun041: Verify multihop EBGP under IPv6 address-family in non-default vrfe-----######') + st.log('######------FtRtVrfFun042: Add/delete non-default vrf with both single hop and multihop EBGP sessions for IPv6 address family-----######') + st.log('######------FtRtVrfFun054: IPv4 forwarding with default route in default vrf and also in non-default vrf-----######') + st.log('######------FtRtVrfFun055: IPv6 forwarding with default route in default vrf and also in non-default vrf-----######') + st.log('#######################################################################################################################') + + result = 0 + loc_lib.dut_vrf_bgp(ve = '1', config = 'no') + st.log('######------Configure EBGP between DUTs------######') + st.log('######------Add Loopback as BGP neighbor------######') + dict1 = {'vrf_name':vrf_name[1],'local_as':'104','neighbor':dut2_loopback_ip[1],'remote_as':'105','config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'local_as':'105','neighbor':dut1_loopback_ip[1],'remote_as':'104','config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1],'local_as':'104','neighbor':dut2_loopback_ip[1],'remote_as':'105','config_type_list':['activate','update_src','ebgp_mhop'],'update_src':dut1_loopback_ip[1],'ebgp_mhop':'1'} + dict2 = {'vrf_name':vrf_name[1],'local_as':'105','neighbor':dut1_loopback_ip[1],'remote_as':'104','config_type_list':['activate','update_src','ebgp_mhop'],'update_src':dut2_loopback_ip[1],'ebgp_mhop':'1'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + st.log('######------Configure static routes on VRF-102------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf, data.dut1, dut2_loopback_ip[1], dut2_loopback_ip_subnet, dut2_dut1_vrf_ip[0],'ipv4',vrf_name[1], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_loopback_ip[1], dut1_loopback_ip_subnet, dut1_dut2_vrf_ip[0],'ipv4',vrf_name[1],'']]) + if not ip_api.ping(data.dut1, dut2_loopback_ip[1], interface= vrf_name[1], count = 2): + st.log('IPv6 Ping from Vrf-102-DUT1 to Vrf-102-DUT2 failed after static route configuration to loopback interface') + result += 1 + if not ip_bgp.verify_bgp_neighbor(data.dut1, neighborip = dut2_loopback_ip[1], state='Established', vrf = vrf_name[1]): + st.log('IPv4 routes on VRF-102 over the loopback, not learnt on DUT2') + result += 1 + dict1 = {'vrf_name':vrf_name[1],'local_as':'104','config_type_list':['redist'],'redistribute':'connected'} + dict2 = {'vrf_name':vrf_name[1],'local_as':'105','config_type_list':['redist'],'redistribute':'connected'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + loc_lib.dut_vrf_bgp(pc = '1', config = 'no') + st.log('######------Add Loopback as BGP neighbor for vrf-103------######') + dict1 = {'vrf_name':vrf_name[2],'local_as':'106','neighbor':dut2_loopback_ipv6[2],'remote_as':'107','addr_family':'ipv6','config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[2],'local_as':'107','neighbor':dut1_loopback_ipv6[2],'remote_as':'106','addr_family':'ipv6','config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[2],'local_as':'106','neighbor':dut2_loopback_ipv6[2],'remote_as':'107','addr_family':'ipv6','config_type_list':['activate','update_src','ebgp_mhop'],'update_src':dut1_loopback_ipv6[2],'ebgp_mhop':'1'} + dict2 = {'vrf_name':vrf_name[2],'local_as':'107','neighbor':dut1_loopback_ipv6[2],'remote_as':'106','addr_family':'ipv6','config_type_list':['activate','update_src','ebgp_mhop'],'update_src':dut2_loopback_ipv6[2],'ebgp_mhop':'1'} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + st.log('######------Configure static routes for vrf-103------######') + utils.exec_all(True,[[ip_api.config_static_route_vrf,data.dut1, dut2_loopback_ipv6[2], dut2_loopback_ipv6_subnet, dut2_dut1_vrf_ipv6[0],'ipv6',vrf_name[2], ''], [ip_api.config_static_route_vrf,data.dut2, dut1_loopback_ipv6[2], dut1_loopback_ipv6_subnet, dut1_dut2_vrf_ipv6[0],'ipv6',vrf_name[2],'']]) + if not ip_api.ping(data.dut1, dut2_loopback_ipv6[2], interface= vrf_name[2], family='ipv6', count = 2): + st.log('IPv6 Ping from Vrf-102-DUT1 to Vrf-102-DUT2 failed after static route configuration to loopback interface') + result += 1 + if not ip_bgp.verify_bgp_neighbor(data.dut1, neighborip = dut2_loopback_ipv6[2], state='Established', vrf = vrf_name[2]): + st.log('IPv4 routes on VRF-102 over the loopback, not learnt on DUT2') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Static route between VRFs failed for VRf-101, VRF-102 and VRF-103') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') + +@pytest.fixture(scope="function") +def vrf_fixture_tc_20_24_25_32_33_44_45(request,prologue_epilogue): + yield + dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut2_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'config':'no','vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut1_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + ############################################################################################################################### + + dict1 = {'config':'no','vrf_name':vrf_name[2],'local_as':dut1_as[2],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'config':'no','vrf_name':vrf_name[2],'local_as':dut2_as[2],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'config':'no','vrf_name':vrf_name[2],'local_as':dut1_as[2],'neighbor':dut2_dut1_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut2_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'config':'no','vrf_name':vrf_name[2],'local_as':dut2_as[2],'neighbor':dut1_dut2_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut1_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + #dict1 = {'vrf_name':'default','local_as':dut1_as[1],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + #dict2 = {'vrf_name':'default','local_as':dut2_as[1],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + #parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + ############################################################################################################################### + #port_api.noshutdown(data.dut1, ['Vlan2','Vlan3']) + +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_20_24_25_32_33_44_45(vrf_fixture_tc_20_24_25_32_33_44_45): + + st.log('#######################################################################################################################') + st.log('######------FtRtVrfFun020: IPv4 static route leak from non-default vrf to another non-default vrf-----######') + st.log('######------FtRtVrfFun024: IPv6 static route leak from non-default vrf to another non-default vrf-----######') + st.log('######------FtRtVrfFun025: Import same route from VRF A to VRF B, C and D -----######') + st.log('#######################################################################################################################') + + result = 0 + #port_api.shutdown(data.dut1, ['Vlan2','Vlan3']) + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut2_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut1_as[1],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + #import_interface = data.d2_dut_ports[0]+'(vrf '+vrf_name[0]+')' + ip_api.show_ip_route(data.dut2, family="ipv4", shell="sonic", vrf_name=vrf_name[1]) + ip_api.show_ip_route(data.dut2, family="ipv6", shell="sonic", vrf_name=vrf_name[1]) + dict1 = {'vrf_name':vrf_name[2],'local_as':dut1_as[2],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'vrf_name':vrf_name[2],'local_as':dut2_as[2],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[2],'local_as':dut1_as[2],'neighbor':dut2_dut1_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut2_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + dict2 = {'vrf_name':vrf_name[2],'local_as':dut2_as[2],'neighbor':dut1_dut2_vrf_ipv6[0],'addr_family':'ipv6','remote_as':dut1_as[2],'config_type_list':['import_vrf'],'import_vrf_name':vrf_name[0]} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + #import_interface = data.d2_dut_ports[0]+'(vrf '+vrf_name[0]+')' + ip_api.show_ip_route(data.dut2, family="ipv4", shell="sonic", vrf_name=vrf_name[2]) + ip_api.show_ip_route(data.dut2, family="ipv6", shell="sonic", vrf_name=vrf_name[2]) + if not loc_lib.verify_bgp(ve = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.verify_bgp(ve = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.verify_bgp(pc = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-103 did not come up') + result += 1 + if not loc_lib.verify_bgp(pc = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-103 did not come up') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Static route between VRFs failed for VRf-101, VRF-102 and VRF-103') + #debug_bgp_vrf() + st.report_fail('test_case_failed') + +#@pytest.mark.depends('test_VrfFun001_06') +def test_VrfFun_05_50(): + + st.log('######################################################################################################################') + st.log('######------FtRtVrfFun005 Configure overlapping IP addresses belonging to different VRFs -----######') + st.log('######------FtRtVrfFun050 Verify non-default vrf after cold reboot -----######') + st.log('#######################################################################################################################') + + result = 0 + if not loc_lib.retry_api(loc_lib.verify_bgp, ve = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.retry_api(loc_lib.verify_bgp, ve = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.retry_api(loc_lib.verify_bgp, pc = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-103 did not come up') + result += 1 + if not loc_lib.retry_api(loc_lib.verify_bgp, pc = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-103 did not come up') + result += 1 + reboot_api.config_save(data.dut1) + reboot_api.config_save(data.dut1,shell='vtysh') + st.reboot(data.dut1, 'fast') + st.log('Waiting for the sessions to come up') + st.wait(40) + if not loc_lib.retry_api(loc_lib.verify_bgp, ve = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.retry_api(loc_lib.verify_bgp, ve = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-102 did not come up') + result += 1 + if not loc_lib.retry_api(loc_lib.verify_bgp, pc = '1',ip = 'ipv4'): + st.log('IPv4 BGP session on VRF-103 did not come up') + result += 1 + if not loc_lib.retry_api(loc_lib.verify_bgp, pc = '1',ip = 'ipv6'): + st.log('IPv6 BGP session on VRF-103 did not come up') + result += 1 + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Save and reload with VRF configuration failed') + loc_lib.debug_bgp_vrf() + st.report_fail('test_case_failed') diff --git a/spytest/tests/routing/VRF/test_vrf_scale.py b/spytest/tests/routing/VRF/test_vrf_scale.py new file mode 100644 index 00000000000..59afc154597 --- /dev/null +++ b/spytest/tests/routing/VRF/test_vrf_scale.py @@ -0,0 +1,376 @@ +############################################################################## + +#Script Title : VRF Lite scale +#Author : Manisha Joshi +#Mail-id : manisha.joshi@broadcom.com + +############################################################################### + +import pytest +import os +import ipaddress + +from spytest import st,utils + +from vrf_vars import * #all the variables used for vrf testcases +from vrf_vars import data +import vrf_lib as loc_lib +from utilities import parallel + +import apis.switching.mac as mac_api +import apis.switching.vlan as vlan_api + +import apis.routing.ip as ip_api +import apis.routing.vrf as vrf_api +import apis.routing.bgp as bgp_api +import apis.routing.ip_bgp as ip_bgp + +import apis.system.port as port_api +import apis.system.reboot as reboot_api +import apis.system.basic as basic_obj + +from spytest.tgen.tg import tgen_obj_dict +from spytest.tgen.tgen_utils import validate_tgen_traffic + + +#Topology: +#------#TG#----(2links)----#DUT1#----(4links)----#DUT2#----(2links)-----#TG#-------# + +def initialize_topology(): +# code for ensuring min topology + st.log("Initialize.............................................................................................................") + vars = st.ensure_min_topology("D1D2:4", "D1T1:2", "D2T1:2") + data.dut_list = st.get_dut_names() + data.dut1 = data.dut_list[0] + data.dut2 = data.dut_list[1] + utils.exec_all(True,[[bgp_api.enable_docker_routing_config_mode,data.dut1], [bgp_api.enable_docker_routing_config_mode,data.dut2]]) + data.d1_dut_ports = [vars.D1D2P1] + data.d2_dut_ports = [vars.D2D1P1] + data.dut1_tg1_ports = [vars.D1T1P1] + data.dut2_tg1_ports = [vars.D2T1P1] + data.tg_dut1_hw_port = vars.T1D1P1 + data.tg_dut2_hw_port = vars.T1D2P1 + data.tg1 = tgen_obj_dict[vars['tgen_list'][0]] + data.tg2 = tgen_obj_dict[vars['tgen_list'][0]] + data.tg_dut1_p1 = data.tg1.get_port_handle(vars.T1D1P1) + data.tg_dut2_p1 = data.tg2.get_port_handle(vars.T1D2P1) + data.d1_p1_intf_v4 = {} + data.d1_p1_intf_v6 = {} + data.d2_p1_intf_v4 = {} + data.d2_p1_intf_v6 = {} + data.stream_list_scale = {} + data.stream = [] + data.dut1_dut2_ip_list = ip_range('5.0.0.1',2,1000) + data.dut2_dut1_ip_list = ip_range('5.0.0.2',2,1000) + data.dut1_tg_host = '6.0.0.1' + data.tg_dut1_host = '6.0.0.2' + data.dut2_tg_host = '7.0.0.1' + data.tg_dut2_host = '7.0.0.2' + data.tg_dut1_stream_start = ip_range('6.0.0.3',3,1000) + data.tg_dut2_stream_start = ip_range('7.0.0.3',3,1000) + data.intf_list = [] + for vlan in dut1_dut2_vlan_scale: + data.intf_list.append('Vlan'+vlan) + +@pytest.fixture(scope='module', autouse = True) +def prologue_epilogue(): + initialize_topology() + #import pdb; pdb.set_trace() + #import code; code.interact(local=globals()) + base_config() + yield + base_unconfig() + +def base_config(): + ############################################################################################################################### + + st.log('###### ----- Taking backup for unconfig ------######') + src_path = "/etc/sonic/config_db.json" + dst_path = "/etc/sonic/default.json" + #cmd = 'cp /etc/sonic/config_db.json /etc/sonic/default.json' + utils.exec_all(True,[[basic_obj.copy_file_to_local_path,data.dut1,src_path,dst_path], [basic_obj.copy_file_to_local_path,data.dut2, src_path, dst_path]]) + ############################################################################################################################### + + st.log('###### ----- Loading json file with vrf and IP address config ------######') + curr_path = os.getcwd() + json_file_dut1 = curr_path+"/routing/VRF/vrf_scale_dut1.json" + st.apply_files(data.dut1, [json_file_dut1]) + + json_file_dut2 = curr_path+"/routing/VRF/vrf_scale_dut2.json" + st.apply_files(data.dut2, [json_file_dut2]) + + utils.exec_all(True,[[st.apply_files,data.dut1,[json_file_dut1]], [st.apply_files,data.dut2,[json_file_dut2]]]) + + st.log('######------Configure vlans and add members------######') + utils.exec_all(True,[[vlan_api.config_vlan_range,data.dut1,'1 999','add'], [vlan_api.config_vlan_range,data.dut2,'1 999','add']]) + utils.exec_all(True,[[vlan_api.config_vlan_range_members,data.dut1,'1 999',data.d1_dut_ports[0],'add'], [vlan_api.config_vlan_range_members,data.dut2,'1 999',data.d2_dut_ports[0],'add']]) + + ############################################################################################################################### + + st.log('######----- Configure IP on DUT--TG interface------######') + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,data.dut1_tg1_ports[0],data.dut1_tg_host,'16','ipv4'], [ip_api.config_ip_addr_interface,data.dut2,data.dut2_tg1_ports[0],data.dut2_tg_host,'16','ipv4']]) + + ############################################################################################################################### + + st.log('######----- Configure hosts and create traffic streams for all VRFs------######') + host_config() + gateway_mac = mac_api.get_sbin_intf_mac(data.dut1, data.dut1_tg1_ports[0]) + + data.stream = data.tg1.tg_traffic_config(port_handle = data.tg_dut1_p1, mode = 'create', duration = '5', transmit_mode = 'continuous', length_mode = 'fixed', port_handle2 = data.tg_dut2_p1, rate_pps = 1000, mac_src = '00.00.00.11.12.53', mac_dst = gateway_mac, ip_src_addr = data.tg_dut1_stream_start[0], ip_dst_addr=data.tg_dut2_stream_start[0], l3_protocol='ipv4',ip_src_mode = 'increment', ip_src_count = 1000, ip_src_step ='0.0.0.1') + data.stream_list_scale.update({'pc_v4_stream':data.stream['stream_id']}) + st.wait(30) + +def base_unconfig(): + ############################################################################################################################### + + st.log('######------Unconfigure static routes on 900 VRFs-----######') + for vrf,dut1_as,dut2_as in zip(vrf_list[899:1000],dut1_as_scale[0:100],dut2_as_scale[0:100]): + dict1 = {'vrf_name':vrf,'local_as':dut1_as,'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + dict2 = {'vrf_name':vrf,'local_as':dut2_as,'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + ############################################################################################################################### + + st.log('######------Unconfigure static routes on 900 VRFs-----######') + dict1 = {'dest_list':data.tg_dut2_stream_start[0:425],'next_hop_list':data.dut2_dut1_ip_list[0:425],'vrf_list':vrf_list[0:425],'config':'no'} + dict2 = {'dest_list':data.tg_dut2_stream_start[0:425],'vrf_list':vrf_list[0:425],'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_static_route, [dict1, dict2]) + st.wait(30) + + st.log('######----- UnConfigure IP on DUT--TG interface------######') + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,data.dut1_tg1_ports[0],data.dut1_tg_host,'16','ipv4','remove'], [ip_api.config_ip_addr_interface,data.dut2,data.dut2_tg1_ports[0],data.dut2_tg_host,'16','ipv4','remove']]) + + dict1 = {'dest_list':data.tg_dut2_stream_start[425:899],'next_hop_list':data.dut2_dut1_ip_list[425:899],'vrf_list':vrf_list[425:899],'config':'no'} + dict2 = {'dest_list':data.tg_dut2_stream_start[425:899],'vrf_list':vrf_list[425:899],'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_static_route, [dict1, dict2]) + st.wait(30) + + ############################################################################################################################### + + st.log('###### ----- Laoding back the config_db file ------######') + #cmd = 'cp /etc/sonic/default.json /etc/sonic/config_db.json' + src_path = "/etc/sonic/default.json" + dst_path = "/etc/sonic/config_db.json" + utils.exec_all(True,[[basic_obj.copy_file_to_local_path,data.dut1,src_path,dst_path], [basic_obj.copy_file_to_local_path,data.dut2, src_path, dst_path]]) + utils.exec_all(True,[[st.reboot,data.dut1,'fast'], [st.reboot,data.dut2,'fast']]) + + ############################################################################################################################### + + host_config(config = 'no') + +@pytest.fixture(scope="function") +def vrf_fixture_vrf_scale(request,prologue_epilogue): + yield + dict1 = {'vrf_name':['Vrf-red'],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.config_vrf, [dict1, dict1]) + +def test_vrf_scale(vrf_fixture_vrf_scale): + + result = 0 + ############################################################################################################################### + + if not vrf_api.verify_vrf(data.dut2, vrfname = vrf_list): + st.log('VRF creation failed on DUT2') + result += 1 + + ############################################################################################################################### + + st.log('######------Flap the underlying interface and reverify the VRF-----######') + port_api.shutdown(data.dut1, data.d1_dut_ports) + port_api.noshutdown(data.dut1, data.d1_dut_ports) + + st.wait(5) + + ############################################################################################################################### + + if not vrf_api.verify_vrf(data.dut1, vrfname = vrf_list): + st.log('Binding of VRF to interfaces failed on DUT1') + result += 1 + + ############################################################################################################################### + + if result == 0 : + st.report_pass('test_case_passed') + else: + st.log('Interface binding failed for 1000 VRFs') + st.report_fail('test_case_failed') + +def test_vrf_route_leak(): + + result = 0 + ############################################################################################################################### + + st.log('######------Configure static routes on 900 VRFs-----######') + dict1 = {'dest_list':data.tg_dut2_stream_start[0:425],'next_hop_list':data.dut2_dut1_ip_list[0:425],'vrf_list':vrf_list[0:425]} + dict2 = {'dest_list':data.tg_dut2_stream_start[0:425],'vrf_list':vrf_list[0:425]} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_static_route, [dict1, dict2]) + st.wait(30) + + dict1 = {'dest_list':data.tg_dut2_stream_start[425:899],'next_hop_list':data.dut2_dut1_ip_list[425:899],'vrf_list':vrf_list[425:899]} + dict2 = {'dest_list':data.tg_dut2_stream_start[425:899],'vrf_list':vrf_list[425:899]} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_static_route, [dict1, dict2]) + st.wait(30) + + ############################################################################################################################### + + loc_lib.clear_tg() + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list_scale.values(), duration = 5) + + traffic_details = {'1': {'tx_ports' : [data.tg_dut1_hw_port],'tx_obj' : [data.tg1],'exp_ratio' : [1],'rx_ports' : [data.tg_dut2_hw_port],'rx_obj' : [data.tg2]}} + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list_scale.values()) + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='aggregate', comp_type='packet_count') + + if not aggrResult: + st.log('IPv4 Traffic on 1000 VRF with route leak failed') + result += 1 + + ############################################################################################################################### + + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Traffic on VRF with static route leak failed') + st.report_fail('test_case_failed') + +def test_vrf_bgp(): + result = 0 + ############################################################################################################################### + + st.log('######------Configure BGP on 100 VRFs-----######') + for vrf, dut1_ip, dut2_ip, dut1_as, dut2_as in zip(vrf_list[899:1000], data.dut1_dut2_ip_list[899:1000], data.dut2_dut1_ip_list[899:1000], dut1_as_scale[0:100], dut2_as_scale[0:100]): + dict1 = {'vrf_name': vrf, 'router_id': dut1_router_id, 'local_as': dut1_as, 'neighbor': dut2_ip, 'remote_as': dut2_as, 'config_type_list': ['neighbor']} + dict2 = {'vrf_name': vrf, 'router_id': dut2_router_id, 'local_as': dut2_as, 'neighbor': dut1_ip, 'remote_as': dut1_as, 'config_type_list': ['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + dict1 = {'vrf_name': vrf, 'local_as': dut1_as, 'neighbor': dut2_ip, 'remote_as': dut2_as, 'connect': '3', 'config_type_list': ['activate', 'nexthop_self', 'connect']} + dict2 = {'vrf_name': vrf, 'local_as': dut2_as, 'neighbor': dut1_ip, 'remote_as': dut1_as, 'connect': '3', 'config_type_list': ['activate', 'nexthop_self', 'connect']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + ############################################################################################################################### + + st.log('######------Verify the BGP neighbors have come up -----######') + if not utils.poll_wait(ip_bgp.verify_bgp_neighbor, 60, data.dut1, neighborip=data.dut2_dut1_ip_list[899], state='Established', vrf=vrf_list[899]): + st.log('IPv4 BGP session on VRF-899 did not come up') + result += 1 + + if not utils.poll_wait(ip_bgp.verify_bgp_neighbor, 60, data.dut1, neighborip=data.dut2_dut1_ip_list[950], state='Established', vrf=vrf_list[950]): + st.log('IPv4 BGP session on VRF-950 did not come up') + result += 1 + + ############################################################################################################################### + + st.log('######------Clear BGP and reverify -----######') + bgp_api.clear_ip_bgp_vrf_vtysh(data.dut1, vrf_list[899], family='ipv4') + st.log('######------Time taken for BGP to come up after clear -----######') + st.wait(10) + if not utils.poll_wait(ip_bgp.verify_bgp_neighbor, 60, data.dut1, neighborip=data.dut2_dut1_ip_list[899], state='Established', vrf=vrf_list[899]): + st.log('IPv4 BGP session on VRF-899 did not come up') + result += 1 + + ############################################################################################################################### + + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('BGP neighborship on 100 VRFs failed') + st.report_fail('test_case_failed') + +def test_vrf_reload(): + result = 0 + ############################################################################################################################### + + reboot_api.config_save(data.dut1) + st.vtysh(data.dut1,"copy running startup") + st.reboot(data.dut1, 'fast') + + ############################################################################################################################### + + if not vrf_api.verify_vrf(data.dut1, vrfname = vrf_list): + st.log('Binding of VRF to interfaces failed on DUT1') + result += 1 + + ############################################################################################################################### + + if result == 0: + st.report_pass('test_case_passed') + else: + st.log('Save and reload with VRF configuration failed') + st.report_fail('test_case_failed') + +def ipaddresslist(): + st.log('######----- Generate 1000 IPs between the DUTs ------######') + start = ipaddress.IPv4Address(u'1.1.1.1') + end = ipaddress.IPv4Address(u'1.1.4.231') + ipaddress_list = [start] + temp = start + while temp != end: + temp += 1 + ipaddress_list.append(temp) + + return ipaddress_list + +def ip_incr(ip,octet): + ip_list = ip.split(".") + ip_list[octet] = str(int(ip_list[octet]) + 1) + return '.'.join(ip_list) + +def ip_range(ip,octet,scl): + ip_list = [ip] + ip2 = ip + i = 1 + j = int(ip.split(".")[octet]) + while i < scl: + if j == 255: + ip = ip_incr(ip, octet-1) + j = int(ip.split(".")[octet]) + ip2 = ip + ip_list.append(ip2) + i += 1 + else: + ip2 = ip_incr(ip2, octet) + ip_list.append(ip2) + i += 1 + j += 1 + return ip_list + +def vrf_static_route(dut,**kwargs): + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + if 'next_hop_list' in kwargs: + next_hop_list = kwargs['next_hop_list'] + if 'vrf_list' in kwargs: + vrf_list = kwargs['vrf_list'] + if 'dest_list' in kwargs: + dest_list = kwargs['dest_list'] + my_cmd = '' + if dut == data.dut1: + for dest,vrf,next_hop in zip(dest_list,vrf_list,next_hop_list): + my_cmd += '{} ip route {}/32 {} nexthop-vrf {} \n'.format(config,dest,next_hop,vrf) + if dut == data.dut2: + for dest,vrf in zip(dest_list,vrf_list): + my_cmd += '{} ip route {}/32 7.0.0.2 nexthop-vrf default vrf {} \n'.format(config,dest,vrf) + st.vtysh_config(dut,my_cmd) + return True + +def host_config(**kwargs): + if 'config' in kwargs: + config = kwargs['config'] + else: + config = 'yes' + if config.lower() == 'yes': + st.log('######----- Configure host on TG for DUT1 and DUT2 ------######') + intf_hand_v4 = data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, mode ='config',intf_ip_addr = data.tg_dut1_host, gateway = data.dut1_tg_host, netmask = '255.255.0.0',arp_send_req = '1') + data.d1_p1_intf_v4.update({data.tg_dut1_host:intf_hand_v4}) + intf_hand_v4 = data.tg2.tg_interface_config(port_handle = data.tg_dut2_p1, mode ='config',intf_ip_addr = data.tg_dut2_host, gateway = data.dut2_tg_host, netmask = '255.255.0.0',arp_send_req = '1') + data.d2_p1_intf_v4.update({data.tg_dut2_host:intf_hand_v4}) + else: + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v4.get(data.tg_dut1_host)['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v4.get(data.tg_dut2_host)['handle'], mode='destroy') + +# frr_path = os.getcwd() + # apply_file = True + # res1 = True + # frr_apply_path = frr_path+"/routing/frr.frr" + # result = st.apply_files(vars.D1, [frr_apply_path]) + diff --git a/spytest/tests/routing/VRF/vrf_lib.py b/spytest/tests/routing/VRF/vrf_lib.py new file mode 100644 index 00000000000..df2189378d6 --- /dev/null +++ b/spytest/tests/routing/VRF/vrf_lib.py @@ -0,0 +1,859 @@ +################################################################################# +#Script Title : VRF Lite +#Author : Manisha Joshi +#Mail-id : manisha.joshi@broadcom.com + +################################################################################# + +from spytest import st,utils + +from vrf_vars import * #all the variables used for vrf testcases +from vrf_vars import data +from utilities import parallel + +import apis.switching.portchannel as pc_api +import apis.switching.vlan as vlan_api + +import apis.routing.ip as ip_api +import apis.routing.vrf as vrf_api +import apis.routing.bgp as bgp_api + +from spytest.tgen.tgen_utils import validate_tgen_traffic, tg_bgp_config + + +def vrf_base_config(): + vrf_config() + tg_vrf_bind() + dut_vrf_bind(phy = '1') + dut_vrf_bind(ve = '1') + dut_vrf_bind(pc = '1') + ip_api.config_route_map_global_nexthop(data.dut1, 'UseGlobal', type = 'next_hop_v6', config = 'yes') + ip_api.config_route_map_global_nexthop(data.dut2, 'UseGlobal', type = 'next_hop_v6', config = 'yes') + dut_vrf_bgp(phy = '1') + dut_vrf_bgp(ve = '1') + dut_vrf_bgp(pc = '1') + tg_vrf_bgp(phy = '1') + tg_vrf_bgp(ve = '1') + tg_vrf_bgp(pc = '1') + # import pdb; pdb.set_trace() + # import code; code.interact(local=globals()) + tg_interfaces(phy = '1') + tg_interfaces(ve = '1') + tg_interfaces(pc = '1') + start_arp_nd() + pump_bgp_routes(dut = data.dut1, ip = 'ipv4', vlan = dut1_tg1_vlan[0]) + pump_bgp_routes(dut = data.dut1, ip = 'ipv6', vlan = dut1_tg1_vlan[0]) + pump_bgp_routes(dut = data.dut1, ip = 'ipv4', vlan = dut1_tg1_vlan[1]) + pump_bgp_routes(dut = data.dut1, ip = 'ipv6', vlan = dut1_tg1_vlan[1]) + pump_bgp_routes(dut = data.dut1, ip = 'ipv4', vlan = dut1_tg1_vlan[2]) + pump_bgp_routes(dut = data.dut1, ip = 'ipv6', vlan = dut1_tg1_vlan[2]) + create_streams_all_vrf(todut = data.dut1) + +def vrf_base_unconfig(): + ip_api.config_route_map_global_nexthop(data.dut1, 'UseGlobal', type = 'next_hop_v6', config = 'no') + ip_api.config_route_map_global_nexthop(data.dut2, 'UseGlobal', type = 'next_hop_v6', config = 'no') + dut_vrf_bgp(phy = '1', config = 'no') + dut_vrf_bgp(ve = '1', config = 'no') + dut_vrf_bgp(pc = '1', config = 'no') + dut_vrf_bind(phy = '1', config = 'no') + dut_vrf_bind(ve = '1', config = 'no') + dut_vrf_bind(pc = '1', config = 'no') + tg_vrf_bind(config = 'no') + vrf_config(config = 'no') + + data.tg1.tg_traffic_control(action = 'reset', port_handle = data.tg_dut1_p1) + data.tg2.tg_traffic_control(action = 'reset', port_handle = data.tg_dut2_p1) + + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v4.get('1')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v4.get('2')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v4.get('3')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v6.get('1')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v6.get('2')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, handle=data.d1_p1_intf_v6.get('3')['handle'], mode='destroy') + + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v4.get('6')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v4.get('7')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v4.get('8')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v6.get('6')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v6.get('7')['handle'], mode='destroy') + data.tg1.tg_interface_config(port_handle = data.tg_dut2_p1, handle=data.d2_p1_intf_v6.get('8')['handle'], mode='destroy') + +def debug_bgp_vrf(): + st.log("Dubug commands starts!") + cmd_list = ['show ip route vrf Vrf-101','show ip route vrf Vrf-102','show ip route vrf Vrf-103','show ipv6 route vrf Vrf-101','show ipv6 route vrf Vrf-102','show ipv6 route vrf Vrf-103','show arp','show ndp'] + utils.exec_all(True, [[st.apply_script, data.dut1, cmd_list], [st.apply_script, data.dut2, cmd_list]]) + st.log(" End of Dubug commands") + +def vrf_config(**kwargs): + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + if config == '': + st.log('######------Configure vrfs ------######') + for vrf in vrf_name[0:3]: + dict1 = {'vrf_name':vrf,'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.config_vrf, [dict1, dict1]) + else: + st.log('######------Unconfigure vrfs ------######') + for vrf in vrf_name[0:3]: + dict1 = {'vrf_name':vrf,'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.config_vrf, [dict1, dict1]) + +def tg_vrf_bind(**kwargs): + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + if config == '': + st.log('######------Configure vlans on the PE--CE side -------######') + utils.exec_all(True, [[vlan_api.create_vlan, data.dut1, dut1_tg1_vlan[0]], [vlan_api.create_vlan, data.dut2, dut2_tg1_vlan[0]]]) + utils.exec_all(True, [[vlan_api.create_vlan, data.dut1, dut1_tg1_vlan[1]], [vlan_api.create_vlan, data.dut2, dut2_tg1_vlan[1]]]) + utils.exec_all(True, [[vlan_api.create_vlan, data.dut1, dut1_tg1_vlan[2]], [vlan_api.create_vlan, data.dut2, dut2_tg1_vlan[2]]]) + + utils.exec_all(True,[[vlan_api.add_vlan_member,data.dut1,dut1_tg1_vlan[0],data.dut1_tg1_ports[0],True,True], [vlan_api.add_vlan_member,data.dut2,dut2_tg1_vlan[0],data.dut2_tg1_ports[0],True,True]]) + utils.exec_all(True,[[vlan_api.add_vlan_member,data.dut1,dut1_tg1_vlan[1],data.dut1_tg1_ports[0],True,True], [vlan_api.add_vlan_member,data.dut2,dut2_tg1_vlan[1],data.dut2_tg1_ports[0],True,True]]) + utils.exec_all(True,[[vlan_api.add_vlan_member,data.dut1,dut1_tg1_vlan[2],data.dut1_tg1_ports[0],True,True], [vlan_api.add_vlan_member,data.dut2,dut2_tg1_vlan[2],data.dut2_tg1_ports[0],True,True]]) + + st.log('######------Bind DUT1 <--> tg1 vlans to vrf------######') + dict1 = {'vrf_name':vrf_name[0], 'intf_name':'Vlan'+dut1_tg1_vlan[0],'skip_error':True} + dict2 = {'vrf_name':vrf_name[0], 'intf_name':'Vlan'+dut2_tg1_vlan[0],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_tg1_vlan[1],'skip_error':True} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_tg1_vlan[1],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[2], 'intf_name':'Vlan'+dut1_tg1_vlan[2],'skip_error':True} + dict2 = {'vrf_name':vrf_name[2], 'intf_name':'Vlan'+dut2_tg1_vlan[2],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Assign v4 addresses to vrf bound tg vlans------######') + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ip[0],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ip[0], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[1],dut1_tg1_vrf_ip[1],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[1], dut2_tg1_vrf_ip[1], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[2],dut1_tg1_vrf_ip[2],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[2], dut2_tg1_vrf_ip[2], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + + st.log('######------Assign v6 addresses to vrf bound tg vlans------######') + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ipv6[0],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ipv6[0], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[1],dut1_tg1_vrf_ipv6[1],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[1], dut2_tg1_vrf_ipv6[1], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_tg1_vlan[2],dut1_tg1_vrf_ipv6[2],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[2], dut2_tg1_vrf_ipv6[2], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + else: + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ip[0],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ip[0], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[1],dut1_tg1_vrf_ip[1],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[1], dut2_tg1_vrf_ip[1], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[2],dut1_tg1_vrf_ip[2],dut1_tg1_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[2], dut2_tg1_vrf_ip[2], dut2_tg1_vrf_ip_subnet, 'ipv4']]) + + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[0],dut1_tg1_vrf_ipv6[0],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[0], dut2_tg1_vrf_ipv6[0], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[1],dut1_tg1_vrf_ipv6[1],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[1], dut2_tg1_vrf_ipv6[1], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_tg1_vlan[2],dut1_tg1_vrf_ipv6[2],dut1_tg1_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_tg1_vlan[2], dut2_tg1_vrf_ipv6[2], dut2_tg1_vrf_ipv6_subnet, 'ipv6']]) + + st.log('######------Bind DUT1 <--> tg1 vlans to vrf------######') + dict1 = {'vrf_name':vrf_name[0], 'intf_name':'Vlan'+dut1_tg1_vlan[0],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[0], 'intf_name':'Vlan'+dut2_tg1_vlan[0],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_tg1_vlan[1],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_tg1_vlan[1],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + dict1 = {'vrf_name':vrf_name[2], 'intf_name':'Vlan'+dut1_tg1_vlan[2],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[2], 'intf_name':'Vlan'+dut2_tg1_vlan[2],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Unconfigure vlans on the PE--CE side -------######') + utils.exec_all(True,[[vlan_api.delete_vlan_member,data.dut1,dut1_tg1_vlan[0],data.dut1_tg1_ports[0], True], [vlan_api.delete_vlan_member,data.dut2,dut2_tg1_vlan[0],data.dut2_tg1_ports[0], True]]) + utils.exec_all(True,[[vlan_api.delete_vlan_member,data.dut1,dut1_tg1_vlan[1],data.dut1_tg1_ports[0], True], [vlan_api.delete_vlan_member,data.dut2,dut2_tg1_vlan[1],data.dut2_tg1_ports[0], True]]) + utils.exec_all(True,[[vlan_api.delete_vlan_member,data.dut1,dut1_tg1_vlan[2],data.dut1_tg1_ports[0], True], [vlan_api.delete_vlan_member,data.dut2,dut2_tg1_vlan[2],data.dut2_tg1_ports[0], True]]) + + utils.exec_all(True, [[vlan_api.delete_vlan, data.dut1, dut1_tg1_vlan[0]], [vlan_api.delete_vlan, data.dut2, dut2_tg1_vlan[0]]]) + utils.exec_all(True, [[vlan_api.delete_vlan, data.dut1, dut1_tg1_vlan[1]], [vlan_api.delete_vlan, data.dut2, dut2_tg1_vlan[1]]]) + utils.exec_all(True, [[vlan_api.delete_vlan, data.dut1, dut1_tg1_vlan[2]], [vlan_api.delete_vlan, data.dut2, dut2_tg1_vlan[2]]]) + +def dut_vrf_bind(**kwargs): + if 'phy' in kwargs: + phy = kwargs['phy'] + else: + phy = '' + if 've' in kwargs: + ve = kwargs['ve'] + else: + ve = '' + if 'pc' in kwargs: + pc = kwargs['pc'] + else: + pc = '' + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if phy != '' and config == '': + st.log('######------Bind DUT1 <--> DUT2 one physical interface to vrf-101 and config v4 and v6 addresses------######') + dict1 = {'vrf_name':vrf_name[0], 'intf_name':data.d1_dut_ports[0],'skip_error':True} + dict2 = {'vrf_name':vrf_name[0], 'intf_name':data.d2_dut_ports[0],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + st.log('######------Bind loopback101 to vrf------######') + dict1 = {'vrf_name':vrf_name[0], 'intf_name':dut1_loopback[0],'skip_error':True} + dict2 = {'vrf_name':vrf_name[0], 'intf_name':dut2_loopback[0],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[0],dut1_loopback_ip[0],dut1_loopback_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[0],dut2_loopback_ip[0],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[0],dut1_loopback_ipv6[0],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[0],dut2_loopback_ipv6[0],dut2_loopback_ipv6_subnet,'ipv6']]) + elif phy != '' and config == 'no': + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,data.d1_dut_ports[0],dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2,data.d2_dut_ports[0], dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[0],dut1_loopback_ip[0],dut1_loopback_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[0],dut2_loopback_ip[0],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[0],dut1_loopback_ipv6[0],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[0],dut2_loopback_ipv6[0],dut2_loopback_ipv6_subnet,'ipv6']]) + + st.log('######------Unbind DUT1 <--> DUT2 one physical interface from vrf-101------######') + dict1 = {'vrf_name':vrf_name[0], 'intf_name':data.d1_dut_ports[0],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[0], 'intf_name':data.d2_dut_ports[0],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Unbind loopback101 to vrf------######') + dict1 = {'vrf_name':vrf_name[0],'intf_name':dut1_loopback[0],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[0],'intf_name':dut2_loopback[0],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + if ve != '' and config == '': + st.log('######------Configure vlans on the PE--PE side - DUT1 -- DUT2------######') + utils.exec_all(True, [[vlan_api.create_vlan, data.dut1, dut1_dut2_vlan[0]], [vlan_api.create_vlan, data.dut2, dut2_dut1_vlan[0]]]) + utils.exec_all(True, [[vlan_api.create_vlan, data.dut1, dut1_dut2_vlan[1]], [vlan_api.create_vlan, data.dut2, dut2_dut1_vlan[1]]]) + utils.exec_all(True, [[vlan_api.create_vlan, data.dut1, dut1_dut2_vlan[2]], [vlan_api.create_vlan, data.dut2, dut2_dut1_vlan[2]]]) + + utils.exec_all(True,[[vlan_api.add_vlan_member,data.dut1,dut1_dut2_vlan[0],data.d1_dut_ports[1],True,True], [vlan_api.add_vlan_member,data.dut2,dut2_dut1_vlan[0],data.d2_dut_ports[1],True,True]]) + utils.exec_all(True,[[vlan_api.add_vlan_member,data.dut1,dut1_dut2_vlan[1],data.d1_dut_ports[1],True,True], [vlan_api.add_vlan_member,data.dut2,dut2_dut1_vlan[1],data.d2_dut_ports[1],True,True]]) + utils.exec_all(True,[[vlan_api.add_vlan_member,data.dut1,dut1_dut2_vlan[2],data.d1_dut_ports[1],True,True], [vlan_api.add_vlan_member,data.dut2,dut2_dut1_vlan[2],data.d2_dut_ports[1],True,True]]) + + st.log('######------Bind DUT1 <--> DUT2 vlans to vrf------######') + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_dut2_vlan[0],'skip_error':True} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_dut1_vlan[0],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_dut2_vlan[1],'skip_error':True} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_dut1_vlan[1],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_dut2_vlan[2],'skip_error':True} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_dut1_vlan[2],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_dut2_vlan[0],dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[0], dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_dut2_vlan[1],dut1_dut2_vrf_ip[1],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[1], dut2_dut1_vrf_ip[1], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_dut2_vlan[2],dut1_dut2_vrf_ip[2],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[2], dut2_dut1_vrf_ip[2], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_dut2_vlan[0],dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[0], dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_dut2_vlan[1],dut1_dut2_vrf_ipv6[1],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[1], dut2_dut1_vrf_ipv6[1], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'Vlan'+dut1_dut2_vlan[2],dut1_dut2_vrf_ipv6[2],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[2], dut2_dut1_vrf_ipv6[2], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + st.log('######------Bind loopback102 to vrf------######') + dict1 = {'vrf_name':vrf_name[1], 'intf_name':dut1_loopback[1],'skip_error':True} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':dut2_loopback[1],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[1],dut1_loopback_ip[1],dut1_loopback_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[1],dut2_loopback_ip[1],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[1],dut1_loopback_ipv6[1],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[1],dut2_loopback_ipv6[1],dut2_loopback_ipv6_subnet,'ipv6']]) + elif ve != '' and config == 'no': + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_dut2_vlan[0],dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[0], dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_dut2_vlan[1],dut1_dut2_vrf_ip[1],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[1], dut2_dut1_vrf_ip[1], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_dut2_vlan[2],dut1_dut2_vrf_ip[2],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[2], dut2_dut1_vrf_ip[2], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_dut2_vlan[0],dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[0], dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_dut2_vlan[1],dut1_dut2_vrf_ipv6[1],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[1], dut2_dut1_vrf_ipv6[1], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'Vlan'+dut1_dut2_vlan[2],dut1_dut2_vrf_ipv6[2],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'Vlan'+dut2_dut1_vlan[2], dut2_dut1_vrf_ipv6[2], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[1],dut1_loopback_ip[1],dut1_loopback_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[1],dut2_loopback_ip[1],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[1],dut1_loopback_ipv6[1],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[1],dut2_loopback_ipv6[1],dut2_loopback_ipv6_subnet,'ipv6']]) + st.log('######------Unbind loopback102 to vrf------######') + dict1 = {'vrf_name':vrf_name[1],'intf_name':dut1_loopback[1],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[1],'intf_name':dut2_loopback[1],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Unbind DUT1 <--> DUT2 vlans to vrf, , assign v4 and v6 address------######') + st.log('######------Bind DUT1 <--> DUT2 vlans to vrf------######') + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_dut2_vlan[0],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_dut1_vlan[0],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_dut2_vlan[1],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_dut1_vlan[1],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut1_dut2_vlan[2],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[1], 'intf_name':'Vlan'+dut2_dut1_vlan[2],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Delete vlans on the PE--PE side - DUT1 -- DUT2------######') + utils.exec_all(True,[[vlan_api.delete_vlan_member,data.dut1,dut1_dut2_vlan[0],data.d1_dut_ports[1], True], [vlan_api.delete_vlan_member,data.dut2,dut2_dut1_vlan[0],data.d2_dut_ports[1], True]]) + utils.exec_all(True,[[vlan_api.delete_vlan_member,data.dut1,dut1_dut2_vlan[1],data.d1_dut_ports[1], True], [vlan_api.delete_vlan_member,data.dut2,dut2_dut1_vlan[1],data.d2_dut_ports[1], True]]) + utils.exec_all(True,[[vlan_api.delete_vlan_member,data.dut1,dut1_dut2_vlan[2],data.d1_dut_ports[1], True], [vlan_api.delete_vlan_member,data.dut2,dut2_dut1_vlan[2],data.d2_dut_ports[1], True]]) + + utils.exec_all(True, [[vlan_api.delete_vlan, data.dut1, dut1_dut2_vlan[0]], [vlan_api.delete_vlan, data.dut2, dut2_dut1_vlan[0]]]) + utils.exec_all(True, [[vlan_api.delete_vlan, data.dut1, dut1_dut2_vlan[1]], [vlan_api.delete_vlan, data.dut2, dut2_dut1_vlan[1]]]) + utils.exec_all(True, [[vlan_api.delete_vlan, data.dut1, dut1_dut2_vlan[2]], [vlan_api.delete_vlan, data.dut2, dut2_dut1_vlan[2]]]) + + if pc != '' and config == '': + st.log('######------Create a port channel on the DUTs and add members------######') + utils.exec_all(True, [[pc_api.create_portchannel, data.dut1, 'PortChannel10'], [pc_api.create_portchannel, data.dut2, 'PortChannel10']]) + utils.exec_all(True, [[pc_api.add_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[2]], [pc_api.add_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[2]]]) + utils.exec_all(True, [[pc_api.add_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[3]], [pc_api.add_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[3]]]) + + st.log('######------Bind DUT1 <--> DUT2 port channel to vrf and config IP addresses------######') + dict1 = {'vrf_name':vrf_name[2], 'intf_name':'PortChannel10','skip_error':True} + dict2 = {'vrf_name':vrf_name[2], 'intf_name':'PortChannel10','skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + st.log('######------Bind loopback103 to vrf------######') + dict1 = {'vrf_name':vrf_name[2], 'intf_name':dut1_loopback[2],'skip_error':True} + dict2 = {'vrf_name':vrf_name[2], 'intf_name':dut2_loopback[2],'skip_error':True} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[2],dut1_loopback_ip[2],dut1_loopback_ip_subnet,'ipv4'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[2],dut2_loopback_ip[2],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.config_ip_addr_interface,data.dut1,dut1_loopback[2],dut1_loopback_ipv6[2],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.config_ip_addr_interface,data.dut2,dut2_loopback[2],dut2_loopback_ipv6[2],dut2_loopback_ipv6_subnet,'ipv6']]) + elif pc != '' and config == 'no': + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ip[0],dut1_dut2_vrf_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ip[0], dut2_dut1_vrf_ip_subnet, 'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,'PortChannel10',dut1_dut2_vrf_ipv6[0],dut1_dut2_vrf_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2, 'PortChannel10', dut2_dut1_vrf_ipv6[0], dut2_dut1_vrf_ipv6_subnet, 'ipv6']]) + + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[2],dut1_loopback_ip[2],dut1_loopback_ip_subnet,'ipv4'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[2],dut2_loopback_ip[2],dut2_loopback_ip_subnet,'ipv4']]) + utils.exec_all(True,[[ip_api.delete_ip_interface,data.dut1,dut1_loopback[2],dut1_loopback_ipv6[2],dut1_loopback_ipv6_subnet,'ipv6'], [ip_api.delete_ip_interface,data.dut2,dut2_loopback[2],dut2_loopback_ipv6[2],dut2_loopback_ipv6_subnet,'ipv6']]) + + st.log('######------Unbind loopback103 to vrf------######') + dict1 = {'vrf_name':vrf_name[2],'intf_name':dut1_loopback[2],'skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[2],'intf_name':dut2_loopback[2],'skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Unbind DUT1 <--> DUT2 physical interfaces to vrf and config v4 and v6 addresses------######') + dict1 = {'vrf_name':vrf_name[2], 'intf_name':'PortChannel10','skip_error':True,'config':'no'} + dict2 = {'vrf_name':vrf_name[2], 'intf_name':'PortChannel10','skip_error':True,'config':'no'} + parallel.exec_parallel(True, [data.dut1, data.dut2], vrf_api.bind_vrf_interface, [dict1, dict2]) + + st.log('######------Delete the member port and port-channel------######') + utils.exec_all(True, [[pc_api.add_del_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[2],'del'], [pc_api.add_del_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[2],'del']]) + utils.exec_all(True, [[pc_api.add_del_portchannel_member, data.dut1, 'PortChannel10',data.d1_dut_ports[3],'del'], [pc_api.add_del_portchannel_member, data.dut2, 'PortChannel10',data.d2_dut_ports[3],'del']]) + utils.exec_all(True, [[pc_api.delete_portchannel, data.dut1, 'PortChannel10'], [pc_api.delete_portchannel, data.dut2, 'PortChannel10']]) + +def verify_vrf_bind(**kwargs): + if 'phy' in kwargs: + phy = kwargs['phy'] + else: + phy = '' + if 've' in kwargs: + ve = kwargs['ve'] + else: + ve = '' + if 'pc' in kwargs: + pc = kwargs['pc'] + else: + pc = '' + if phy != '': + result = vrf_api.verify_vrf_verbose(data.dut1, vrfname = vrf_name[0], interface = [data.d1_dut_ports[0],'Vlan1']) + if result is False: + st.report_fail('vrf_bind','Vlan1',vrf_name[0]) + result = vrf_api.verify_vrf_verbose(data.dut1, vrfname = vrf_name[0], interface = [data.d1_dut_ports[0]]) + if result is False: + st.report_fail('vrf_bind',data.d1_dut_ports[0],vrf_name[0]) + result = ip_api.verify_interface_ip_address(data.dut1, data.d1_dut_ports[0],dut1_dut2_vrf_ip[0]+'/24', vrfname = vrf_name[0]) + if result is False: + st.report_fail('vrf_bind',data.d1_dut_ports[0],vrf_name[0]) + result = ip_api.verify_interface_ip_address(data.dut1, data.d1_dut_ports[0],dut1_dut2_vrf_ipv6[0]+'/64', vrfname = vrf_name[0]) + if result is False: + st.report_fail('vrf_bind',data.d1_dut_ports[0],vrf_name[0]) + return result + if pc != '': + result = vrf_api.verify_vrf_verbose(data.dut1, vrfname = vrf_name[2], interface = ['Vlan3']) + if result is False: + st.report_fail('vrf_bind','Vlan3',vrf_name[2]) + result = vrf_api.verify_vrf_verbose(data.dut1, vrfname = vrf_name[2], interface = ['PortChannel10']) + if result is False: + st.report_fail('vrf_bind','PortChannel10',vrf_name[2]) + result = ip_api.verify_interface_ip_address(data.dut1, 'PortChannel10',dut1_dut2_vrf_ip[0]+'/24', vrfname = vrf_name[2]) + if result is False: + st.report_fail('vrf_bind','PortChannel10',vrf_name[2]) + result = ip_api.verify_interface_ip_address(data.dut1, 'PortChannel10',dut1_dut2_vrf_ipv6[0]+'/64', vrfname = vrf_name[2]) + if result is False: + st.report_fail('vrf_bind','PortChannel10',vrf_name[2]) + return result + if ve != '': + result = ip_api.verify_interface_ip_address(data.dut1, 'Vlan2', dut1_tg1_vrf_ipv6[1]+'/64', vrfname = vrf_name[1], family = 'ipv6') + if result is False: + st.report_fail('vrf_bind','Vlan2',vrf_name[1]) + result = vrf_api.verify_vrf_verbose(data.dut1, vrfname = vrf_name[1], interface = 'Vlan101') + if result is False: + st.report_fail('vrf_bind','Vlan101',vrf_name[1]) + result = ip_api.verify_interface_ip_address(data.dut1, 'Vlan101',dut1_dut2_vrf_ip[0]+'/24', vrfname = vrf_name[1]) + if result is False: + st.report_fail('vrf_bind','Vlan101',vrf_name[1]) + result = ip_api.verify_interface_ip_address(data.dut1, 'Vlan102',dut1_dut2_vrf_ip[0]+'/24', vrfname = vrf_name[1]) + if result is False: + st.report_fail('vrf_bind','Vlan102',vrf_name[1]) + return result + +def dut_vrf_bgp(**kwargs): + if 'phy' in kwargs: + phy = kwargs['phy'] + else: + phy = '' + if 've' in kwargs: + ve = kwargs['ve'] + else: + ve = '' + if 'pc' in kwargs: + pc = kwargs['pc'] + else: + pc = '' + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + if phy != '' and config == '': + st.log('######------Configure BGP in vrf--######') + dict1 = {'vrf_name':vrf_name[0],'router_id':dut1_router_id,'local_as':dut1_as[0],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[0],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[0],'router_id':dut2_router_id,'local_as':dut2_as[0],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[0],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[0],'local_as':dut1_as[0],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[0],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[0],'local_as':dut2_as[0],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[0],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + st.log('######------Configure BGPv4+ in vrf ------######') + dict1 = {'vrf_name':vrf_name[0],'router_id':dut1_router_id,'local_as':dut1_as[0],'addr_family':'ipv6','neighbor':dut2_dut1_vrf_ipv6[0],'remote_as':dut2_as[0],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[0],'router_id':dut2_router_id,'local_as':dut2_as[0],'addr_family':'ipv6','neighbor':dut1_dut2_vrf_ipv6[0],'remote_as':dut1_as[0],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[0],'local_as':dut1_as[0],'addr_family':'ipv6','neighbor':dut2_dut1_vrf_ipv6[0],'remote_as':dut2_as[0],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[0],'local_as':dut2_as[0],'addr_family':'ipv6','neighbor':dut1_dut2_vrf_ipv6[0],'remote_as':dut1_as[0],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + bgp_api.config_bgp(dut = data.dut2, vrf_name = vrf_name[0], local_as = dut1_as[0], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut1_dut2_vrf_ipv6[0]) + elif phy != '' and config == 'no': + dict1 = {'vrf_name':vrf_name[0],'local_as':dut1_as[0],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + dict2 = {'vrf_name':vrf_name[0],'local_as':dut2_as[0],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + if ve != '' and config == '': + st.log('######------Configure BGP in vrf--######') + dict1 = {'vrf_name':vrf_name[1],'router_id':dut1_router_id,'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'router_id':dut2_router_id,'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[1],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[1],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + st.log('######------Configure BGPv4+ in vrf ------######') + dict1 = {'vrf_name':vrf_name[1],'router_id':dut1_router_id,'local_as':dut1_as[1],'addr_family':'ipv6','neighbor':dut2_dut1_vrf_ipv6[0],'remote_as':dut2_as[1],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[1],'router_id':dut2_router_id,'local_as':dut2_as[1],'addr_family':'ipv6','neighbor':dut1_dut2_vrf_ipv6[0],'remote_as':dut1_as[1],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'addr_family':'ipv6','neighbor':dut2_dut1_vrf_ipv6[0],'remote_as':dut2_as[1],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'addr_family':'ipv6','neighbor':dut1_dut2_vrf_ipv6[0],'remote_as':dut1_as[1],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + bgp_api.config_bgp(dut = data.dut2, vrf_name = vrf_name[1], local_as = dut1_as[1], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut1_dut2_vrf_ipv6[0]) + elif ve != '' and config == 'no': + dict1 = {'vrf_name':vrf_name[1],'local_as':dut1_as[1],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + dict2 = {'vrf_name':vrf_name[1],'local_as':dut2_as[1],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + if pc != '' and config == '': + st.log('######------Configure BGP in vrf--######') + dict1 = {'vrf_name':vrf_name[2],'router_id':dut1_router_id,'local_as':dut1_as[2],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[2],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[2],'router_id':dut2_router_id,'local_as':dut2_as[2],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[2],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[2],'local_as':dut1_as[2],'neighbor':dut2_dut1_vrf_ip[0],'remote_as':dut2_as[2],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[2],'local_as':dut2_as[2],'neighbor':dut1_dut2_vrf_ip[0],'remote_as':dut1_as[2],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + st.log('######------Configure BGPv4+ in vrf ------######') + dict1 = {'vrf_name':vrf_name[2],'router_id':dut1_router_id,'local_as':dut1_as[2],'addr_family':'ipv6','neighbor':dut2_dut1_vrf_ipv6[0],'remote_as':dut2_as[2],'config_type_list':['neighbor']} + dict2 = {'vrf_name':vrf_name[2],'router_id':dut2_router_id,'local_as':dut2_as[2],'addr_family':'ipv6','neighbor':dut1_dut2_vrf_ipv6[0],'remote_as':dut1_as[2],'config_type_list':['neighbor']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + dict1 = {'vrf_name':vrf_name[2],'local_as':dut1_as[2],'addr_family':'ipv6','neighbor':dut2_dut1_vrf_ipv6[0],'remote_as':dut2_as[2],'config_type_list':['activate','nexthop_self']} + dict2 = {'vrf_name':vrf_name[2],'local_as':dut2_as[2],'addr_family':'ipv6','neighbor':dut1_dut2_vrf_ipv6[0],'remote_as':dut1_as[2],'config_type_list':['activate','nexthop_self']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + + bgp_api.config_bgp(dut = data.dut2, vrf_name = vrf_name[2], local_as = dut1_as[2], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=dut1_dut2_vrf_ipv6[0]) + elif pc != '' and config == 'no': + dict1 = {'vrf_name':vrf_name[2],'local_as':dut1_as[2],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + dict2 = {'vrf_name':vrf_name[2],'local_as':dut2_as[2],'config':'no','removeBGP':'yes','config_type_list':['removeBGP']} + parallel.exec_parallel(True, [data.dut1, data.dut2], bgp_api.config_bgp, [dict1, dict2]) + +def verify_bgp(**kwargs): + if 'phy' in kwargs: + phy = kwargs['phy'] + else: + phy = '' + if 've' in kwargs: + ve = kwargs['ve'] + else: + ve = '' + if 'pc' in kwargs: + pc = kwargs['pc'] + else: + pc = '' + if 'ip' in kwargs: + ip = kwargs['ip'] + + if ip == 'ipv4': + if phy != '': + result = bgp_api.verify_bgp_summary(data.dut1, family='ipv4', neighbor = dut2_dut1_vrf_ip[0], state='Established',vrf = vrf_name[0]) + return result + if ve != '': + result = bgp_api.verify_bgp_summary(data.dut1, family='ipv4', neighbor = dut2_dut1_vrf_ip[0], state='Established',vrf = vrf_name[1]) + return result + if pc != '': + result = bgp_api.verify_bgp_summary(data.dut1, family='ipv4', neighbor = dut2_dut1_vrf_ip[0], state='Established',vrf = vrf_name[2]) + return result + else: + if phy != '': + result = bgp_api.verify_bgp_summary(data.dut1, family='ipv6', neighbor = dut2_dut1_vrf_ipv6[0], state='Established',vrf = vrf_name[0]) + return result + if ve != '': + result = bgp_api.verify_bgp_summary(data.dut1, family='ipv6', neighbor = dut2_dut1_vrf_ipv6[0], state='Established',vrf = vrf_name[1]) + return result + if pc != '': + result = bgp_api.verify_bgp_summary(data.dut1, family='ipv6', neighbor = dut2_dut1_vrf_ipv6[0], state='Established',vrf = vrf_name[2]) + return result + +def tg_vrf_bgp(**kwargs): + if 'phy' in kwargs: + phy = kwargs['phy'] + else: + phy = '' + if 've' in kwargs: + ve = kwargs['ve'] + else: + ve = '' + if 'pc' in kwargs: + pc = kwargs['pc'] + else: + pc = '' + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + + if phy != '' and config == '': + st.log('######------Configure BGP in vrf - 101 for TG interface------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0], router_id = dut1_router_id, local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ip[0], remote_as = dut1_tg_as, config = 'yes', config_type_list =['neighbor','activate']) + st.log('######------Configure BGPv4+ in vrf - 101 for TG interface ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0], router_id = dut1_router_id, addr_family ='ipv6', local_as = dut1_as[0], neighbor = tg1_dut1_vrf_ipv6[0], remote_as = dut1_tg_as, config = 'yes', config_type_list =['neighbor', 'activate']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[0], local_as = dut1_as[0], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=tg1_dut1_vrf_ipv6[0]) + elif phy != '' and config == 'no': + bgp_api.config_bgp(dut = data.dut1, local_as = dut1_as[0], vrf_name = vrf_name[0] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + + if ve != '' and config == '': + st.log('######------Configure BGP in vrf -102 for TG interface------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[1], router_id = dut1_router_id, local_as = dut1_as[1], neighbor = tg1_dut1_vrf_ip[1], remote_as = dut1_tg_as, config = 'yes', config_type_list =['neighbor','activate']) + st.log('######------Configure BGPv4+ in vrf-102 for TG interface ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[1], router_id = dut1_router_id, addr_family ='ipv6', local_as = dut1_as[1], neighbor = tg1_dut1_vrf_ipv6[1], remote_as = dut1_tg_as, config = 'yes', config_type_list =['neighbor','activate']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[1], local_as = dut1_as[1], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=tg1_dut1_vrf_ipv6[1]) + elif ve != '' and config == 'no': + bgp_api.config_bgp(dut = data.dut1, local_as = dut1_as[1], vrf_name = vrf_name[1] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + + if pc != '' and config == '': + st.log('######------Configure BGP in vrf - 103 for TG interface------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], router_id = dut1_router_id, local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ip[2], remote_as = dut1_tg_as, config = 'yes', config_type_list =['neighbor','activate']) + st.log('######------Configure BGPv4+ in vrf - 103 for TG interface ------######') + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], router_id = dut1_router_id, addr_family ='ipv6', local_as = dut1_as[2], neighbor = tg1_dut1_vrf_ipv6[2], remote_as = dut1_tg_as, config = 'yes', config_type_list =['neighbor','activate']) + bgp_api.config_bgp(dut = data.dut1, vrf_name = vrf_name[2], local_as = dut1_as[2], addr_family ='ipv6', config_type_list=["routeMap"], routeMap='UseGlobal', diRection='in', neighbor=tg1_dut1_vrf_ipv6[2]) + elif pc != '' and config == 'no': + bgp_api.config_bgp(dut = data.dut1, local_as = dut1_as[2], vrf_name = vrf_name[2] ,config = 'no', removeBGP = 'yes', config_type_list = ["removeBGP"]) + +def mutliple_hosts(**kwargs): + if 'dut' in kwargs: + dut = kwargs['dut'] + if 'vlan' in kwargs: + vlan = kwargs['vlan'] + if 'ip' in kwargs: + ip = kwargs['ip'] + + if dut == data.dut1: + if ip == 'ipv4': + if vlan == '1': + intf_ip = tg1_dut1_vrf_ip[0] + gatway = dut1_tg1_vrf_ip[0] + if vlan == '2': + intf_ip = tg1_dut1_vrf_ip[1] + gatway = dut1_tg1_vrf_ip[1] + if vlan == '3': + intf_ip = tg1_dut1_vrf_ip[2] + gatway = dut1_tg1_vrf_ip[2] + st.log('######------On DUT1 create ipv4 hosts on vlan '+vlan+' -----######') + # intf_hand_v4 = data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, mode='config', intf_ip_addr = intf_ip, gateway = gatway, count = dut1_hosts, gateway_step ='0.0.0.0', netmask = '255.255.255.0', vlan = '1', vlan_id = vlan, intf_ip_addr_step = '0.0.0.1', arp_send_req = '1', vlan_id_step='0') + intf_hand_v4 = data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, mode='config', intf_ip_addr = intf_ip, gateway = gatway, netmask = '255.255.255.0', vlan = '1', vlan_id = vlan, intf_ip_addr_step = '0.0.0.1', arp_send_req = '1') + data.d1_p1_intf_v4.update({vlan:intf_hand_v4}) + else: + if vlan == '1': + intf_ip = tg1_dut1_vrf_ipv6[0] + gatway = dut1_tg1_vrf_ipv6[0] + if vlan == '2': + intf_ip = tg1_dut1_vrf_ipv6[1] + gatway = dut1_tg1_vrf_ipv6[1] + if vlan == '3': + intf_ip = tg1_dut1_vrf_ipv6[2] + gatway = dut1_tg1_vrf_ipv6[2] + st.log('######------On DUT1 create ipv6 hosts on vlan '+vlan+' -----######') + # intf_hand_v6 = data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, mode = 'config', ipv6_intf_addr = intf_ip, ipv6_prefix_length = '64', ipv6_gateway=gatway, arp_send_req='1', vlan = '1', vlan_id = vlan, ipv6_intf_addr_step = '::1', count = dut1_hosts, vlan_id_step='0') + intf_hand_v6 = data.tg1.tg_interface_config(port_handle = data.tg_dut1_p1, mode = 'config', ipv6_intf_addr = intf_ip, ipv6_prefix_length = '64', ipv6_gateway=gatway, arp_send_req='1', vlan = '1', vlan_id = vlan) + data.d1_p1_intf_v6.update({vlan:intf_hand_v6}) + else: + if ip == 'ipv4': + if vlan == '6': + intf_ip = tg1_dut2_vrf_ip[0] + gatway = dut2_tg1_vrf_ip[0] + if vlan == '7': + intf_ip = tg1_dut2_vrf_ip[1] + gatway = dut2_tg1_vrf_ip[1] + if vlan == '8': + intf_ip = tg1_dut2_vrf_ip[2] + gatway = dut2_tg1_vrf_ip[2] + st.log('######------On DUT2 create ipv4 hosts on vlan '+vlan+' -----######') + # intf_hand_v4 = data.tg2.tg_interface_config(port_handle = data.tg_dut2_p1, mode='config', intf_ip_addr = intf_ip, gateway = gatway, count = dut2_hosts, gateway_step ='0.0.0.0', netmask = '255.255.255.0', vlan = '1', vlan_id = vlan, intf_ip_addr_step = '0.0.0.1', arp_send_req = '1', vlan_id_step='0') + intf_hand_v4 = data.tg2.tg_interface_config(port_handle = data.tg_dut2_p1, mode='config', intf_ip_addr = intf_ip, gateway = gatway, netmask = '255.255.255.0', vlan = '1', vlan_id = vlan, arp_send_req = '1') + data.d2_p1_intf_v4.update({vlan:intf_hand_v4}) + else: + if vlan == '6': + intf_ip = tg1_dut2_vrf_ipv6[0] + gatway = dut2_tg1_vrf_ipv6[0] + if vlan == '7': + intf_ip = tg1_dut2_vrf_ipv6[1] + gatway = dut2_tg1_vrf_ipv6[1] + if vlan == '8': + intf_ip = tg1_dut2_vrf_ipv6[2] + gatway = dut2_tg1_vrf_ipv6[2] + st.log('######------On DUT2 create ipv6 hosts on vlan '+vlan+' -----######') + # intf_hand_v6 = data.tg2.tg_interface_config(port_handle = data.tg_dut2_p1, mode = 'config', ipv6_intf_addr = intf_ip, ipv6_prefix_length = '64', ipv6_gateway=gatway, arp_send_req='1', vlan = '1', vlan_id = vlan, ipv6_intf_addr_step = '::1', count = dut2_hosts, vlan_id_step='0') + intf_hand_v6 = data.tg2.tg_interface_config(port_handle = data.tg_dut2_p1, mode = 'config', ipv6_intf_addr = intf_ip, ipv6_prefix_length = '64', ipv6_gateway=gatway, arp_send_req='1', vlan = '1', vlan_id = vlan) + data.d2_p1_intf_v6.update({vlan:intf_hand_v6}) + +def start_arp_nd(): + data.tg1.tg_arp_control(handle=data.d1_p1_intf_v4.get('1')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d1_p1_intf_v4.get('2')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d1_p1_intf_v4.get('3')['handle'], arp_target='all') + + data.tg1.tg_arp_control(handle=data.d1_p1_intf_v6.get('1')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d1_p1_intf_v6.get('2')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d1_p1_intf_v6.get('3')['handle'], arp_target='all') + + data.tg1.tg_arp_control(handle=data.d2_p1_intf_v4.get('6')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d2_p1_intf_v4.get('7')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d2_p1_intf_v4.get('8')['handle'], arp_target='all') + + data.tg1.tg_arp_control(handle=data.d2_p1_intf_v6.get('6')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d2_p1_intf_v6.get('7')['handle'], arp_target='all') + data.tg1.tg_arp_control(handle=data.d2_p1_intf_v6.get('8')['handle'], arp_target='all') + +def pump_bgp_routes(**kwargs): + if 'dut' in kwargs: + dut = kwargs['dut'] + if 'ip' in kwargs: + ip = kwargs['ip'] + if 'vlan' in kwargs: + vlan = kwargs['vlan'] + if dut == data.dut1: + if ip == 'ipv4': + if vlan == '1': + remote_as = dut1_as[0] + gatway = dut1_tg1_vrf_ip[0] + if vlan == '2': + remote_as = dut1_as[1] + gatway = dut1_tg1_vrf_ip[1] + if vlan == '3': + remote_as = dut1_as[2] + gatway = dut1_tg1_vrf_ip[2] + st.log('######------On DUT1 send IPv4 BGP routes from Tgen -----######') + d1_p1_v4_config = {'mode':'enable', 'active_connect_enable':'1', 'local_as':'300', 'remote_as':remote_as, 'remote_ip_addr':gatway} + d1_p1_v4_route = {'mode':'add','num_routes':tg_dut1_p1_v4_routes, 'as_path':'as_seq:1', 'prefix':tg_dut1_p1_v4_prefix} + d1_p1_v4_start = {'mode':'start'} + intf_handle = data.d1_p1_intf_v4.get(vlan) + # bgp_router = tg_bgp_config(tg = data.tg1, handle = intf_handle['handle'][0], conf_var = d1_p1_v4_config, route_var = d1_p1_v4_route, ctrl_var = d1_p1_v4_start) + bgp_router = tg_bgp_config(tg = data.tg1, handle = intf_handle['handle'], conf_var = d1_p1_v4_config, route_var = d1_p1_v4_route, ctrl_var = d1_p1_v4_start) + st.log("DUT1 PORT1 BGP_HANDLE: "+str(bgp_router)) + + # bgp_router = tg_bgp_config(tg = data.tg1, port_handle = data.tg_dut1_p1, mode = 'enable', ip_version = 4, intf_ip_addr = tg1_dut1_vrf_ip[0],gateway = dut1_tg1_vrf_ip[0], local_as = 300, remote_as = dut1_as[0], next_hop_ip = dut1_tg1_vrf_ip[0]) + + data.d1_p1_bgp_v4.update({vlan:bgp_router}) + else: + if vlan == '1': + remote_as = dut1_as[0] + gatway = dut1_tg1_vrf_ipv6[0] + if vlan == '2': + remote_as = dut1_as[1] + gatway = dut1_tg1_vrf_ipv6[1] + if vlan == '3': + remote_as = dut1_as[2] + gatway = dut1_tg1_vrf_ipv6[2] + st.log('######------On DUT1 send IPv6 BGP routes from Tgen -----######') + d1_p1_v6_config = {'mode':'enable', 'ip_version':'6', 'active_connect_enable':'1', 'local_as':'300', 'remote_as':remote_as, 'remote_ipv6_addr':gatway} + d1_p1_v6_route = {'mode':'add', 'ip_version':'6', 'num_routes':tg_dut1_p1_v6_routes, 'as_path':'as_seq:1', 'prefix':tg_dut1_p1_v6_prefix} + d1_p1_v6_start = {'mode':'start'} + intf_handle = data.d1_p1_intf_v6.get(vlan) + #bgp_router = tg_bgp_config(tg = data.tg1, handle = intf_handle['handle'][0], conf_var = d1_p1_v6_config, route_var = d1_p1_v6_route, ctrl_var = d1_p1_v6_start) + bgp_router = tg_bgp_config(tg = data.tg1, handle = intf_handle['handle'], conf_var = d1_p1_v6_config, route_var = d1_p1_v6_route, ctrl_var = d1_p1_v6_start) + data.d1_p1_bgp_v6.update({vlan:bgp_router}) + +def create_streams_all_vrf(**kwargs): + if 'todut' in kwargs: + todut = kwargs['todut'] + + if todut == data.dut1: + intf_handle1 = data.d2_p1_intf_v4.get('6') + bgp_handle1 = data.d1_p1_bgp_v4.get('1') + + intf_handle2 = data.d2_p1_intf_v4.get('7') + bgp_handle2 = data.d1_p1_bgp_v4.get('2') + + intf_handle3 = data.d2_p1_intf_v4.get('8') + bgp_handle3 = data.d1_p1_bgp_v4.get('3') + + # tc1 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, emulation_src_handle = intf_handle1['handle'][0], emulation_dst_handle = bgp_handle1['route'][0]['handle'], circuit_endpoint_type = 'ipv4', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + # data.stream_list.update({'phy_v4_stream':tc1['stream_id']}) + + # tc2 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, emulation_src_handle = intf_handle2['handle'][0], emulation_dst_handle = bgp_handle2['route'][0]['handle'], circuit_endpoint_type = 'ipv4', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + # data.stream_list.update({'ve_v4_stream':tc2['stream_id']}) + + # tc3 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, emulation_src_handle = intf_handle3['handle'][0], emulation_dst_handle = bgp_handle3['route'][0]['handle'], circuit_endpoint_type = 'ipv4', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + # data.stream_list.update({'pc_v4_stream':tc3['stream_id']}) + + tc1 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, duration = '2', emulation_src_handle = intf_handle1['handle'], emulation_dst_handle = bgp_handle1['route'][0]['handle'], circuit_endpoint_type = 'ipv4', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + data.stream_list.update({'phy_v4_stream':tc1['stream_id']}) + + tc2 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, duration = '2', emulation_src_handle = intf_handle2['handle'], emulation_dst_handle = bgp_handle2['route'][0]['handle'], circuit_endpoint_type = 'ipv4', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + data.stream_list.update({'ve_v4_stream':tc2['stream_id']}) + + tc3 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, duration = '2', emulation_src_handle = intf_handle3['handle'], emulation_dst_handle = bgp_handle3['route'][0]['handle'], circuit_endpoint_type = 'ipv4', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + data.stream_list.update({'pc_v4_stream':tc3['stream_id']}) + + intf_handle4 = data.d2_p1_intf_v6.get('6') + bgp_handle4 = data.d1_p1_bgp_v6.get('1') + + intf_handle5 = data.d2_p1_intf_v6.get('7') + bgp_handle5 = data.d1_p1_bgp_v6.get('2') + + intf_handle6 = data.d2_p1_intf_v6.get('8') + bgp_handle6 = data.d1_p1_bgp_v6.get('3') + + # tc4 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, emulation_src_handle = intf_handle4['handle'][0], emulation_dst_handle = bgp_handle4['route'][0]['handle'], circuit_endpoint_type = 'ipv6', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + # data.stream_list.update({'phy_v6_stream':tc4['stream_id']}) + + # tc5 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, emulation_src_handle = intf_handle5['handle'][0], emulation_dst_handle = bgp_handle5['route'][0]['handle'], circuit_endpoint_type = 'ipv6', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + # data.stream_list.update({'ve_v6_stream':tc5['stream_id']}) + + # tc6 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, emulation_src_handle = intf_handle6['handle'][0], emulation_dst_handle = bgp_handle6['route'][0]['handle'], circuit_endpoint_type = 'ipv6', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + # data.stream_list.update({'pc_v6_stream':tc6['stream_id']}) + + tc4 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, duration = '2', emulation_src_handle = intf_handle4['handle'], emulation_dst_handle = bgp_handle4['route'][0]['handle'], circuit_endpoint_type = 'ipv6', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + data.stream_list.update({'phy_v6_stream':tc4['stream_id']}) + + tc5 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, duration = '2', emulation_src_handle = intf_handle5['handle'], emulation_dst_handle = bgp_handle5['route'][0]['handle'], circuit_endpoint_type = 'ipv6', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + data.stream_list.update({'ve_v6_stream':tc5['stream_id']}) + + tc6 = data.tg2.tg_traffic_config(port_handle = data.tg_dut2_p1, duration = '2', emulation_src_handle = intf_handle6['handle'], emulation_dst_handle = bgp_handle6['route'][0]['handle'], circuit_endpoint_type = 'ipv6', mode = 'create', transmit_mode = 'continuous', length_mode = 'fixed', rate_pps = tg_dut1_rate_pps, port_handle2 = data.tg_dut1_p1) + data.stream_list.update({'pc_v6_stream':tc6['stream_id']}) + return + +def verify_traffic_all_vrfs(**kwargs): + if 'stream_id' in kwargs: + stream_id = kwargs['stream_id'] + else: + stream_id = 'all' + + st.log('######------Clear stats before the start of the traffic -----######') + #data.tg2.tg_traffic_control(action = 'stop', port_handle = data.tg_dut2_p1) + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.values()) + data.tg1.tg_traffic_control(action = 'clear_stats',port_handle = data.tg_dut1_p1) + data.tg1.tg_traffic_control(action = 'clear_stats',port_handle = data.tg_dut2_p1) + + #data.tg2.tg_traffic_control(action = 'run', port_handle = data.tg_dut2_p1, duration = '2') #send continuous traffic for 5 seconds + data.tg2.tg_traffic_control(action = 'run', stream_handle = data.stream_list.values(), duration = '2') #send continuous traffic for 5 seconds + #data.tg2.tg_traffic_control(action = 'stop', port_handle = data.tg_dut2_p1) # stop the traffic + data.tg2.tg_traffic_control(action = 'stop', stream_handle = data.stream_list.values()) # stop the traffic + + if stream_id == 'all': + # traffic_details = {'1': {'tx_ports' : [vars.T1D2P1],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [vars.T1D1P1],'rx_obj' : [data.tg1],'stream_list' : [list(data.stream_list.values())]}} + traffic_details = {'1': {'tx_ports' : [data.tg_dut2_hw_port],'tx_obj' : [data.tg2],'exp_ratio' : [1],'rx_ports' : [data.tg_dut1_hw_port],'rx_obj' : [data.tg1],'stream_list' : [data.stream_list.values()]}} + aggrResult = validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', comp_type='packet_count') + return aggrResult + +def clear_tg(**kwargs): + data.tg1.tg_traffic_control(action = 'clear_stats',port_handle = data.tg_dut1_p1) + data.tg1.tg_traffic_control(action = 'clear_stats',port_handle = data.tg_dut2_p1) + +def config_route_map(dut,rmap,type='next_hop_v6',config='yes'): + if config == 'yes': + st.vtysh_config(dut,"route-map %s permit 10"%rmap) + if type=='local_pref': + st.vtysh_config(dut,"set local-preference 40") + elif type == 'next_hop_v6': + st.log('######------Configure route-map to prefer global v6 address over link local address as next hop ------######') + st.vtysh_config(dut, "set ipv6 next-hop prefer-global") + st.vtysh(dut,'end') + st.vtysh(dut,'exit') + else: + st.vtysh_config(dut,"no route-map %s permit 10"% rmap) + st.vtysh(dut,'end') + st.vtysh(dut,'exit') + +def retry_api(func,**kwargs): + retry_count = kwargs.get("retry_count", 10) + delay = kwargs.get("delay", 3) + if 'retry_count' in kwargs: del kwargs['retry_count'] + if 'delay' in kwargs: del kwargs['delay'] + for i in range(retry_count): + st.log("Attempt %s of %s" %((i+1),retry_count)) + if func(**kwargs): + return True + if retry_count != (i+1): + st.log("waiting for %s seconds before retyring again"%delay) + st.wait(delay) + return False + +def tg_interfaces(**kwargs): + if 'config' in kwargs: + config = kwargs['config'] + else: + config = '' + if 'phy' in kwargs: + phy = kwargs['phy'] + else: + phy = '' + if 've' in kwargs: + ve = kwargs['ve'] + else: + ve = '' + if 'pc' in kwargs: + pc = kwargs['pc'] + else: + pc = '' + + if config == '' and phy != '': + st.log('######------Push multiple hosts on DUT1 on vlan 1 and vlan 6------######') + mutliple_hosts(dut = data.dut1, vlan = dut1_tg1_vlan[0], ip = 'ipv4') + mutliple_hosts(dut = data.dut2, vlan = dut2_tg1_vlan[0], ip = 'ipv4') + mutliple_hosts(dut = data.dut1, vlan = dut1_tg1_vlan[0], ip = 'ipv6') + mutliple_hosts(dut = data.dut2, vlan = dut2_tg1_vlan[0], ip = 'ipv6') + + if config == '' and ve != '': + st.log('######------Push multiple hosts on DUT1 on vlan 2 and vlan 7------######') + mutliple_hosts(dut = data.dut1, vlan = dut1_tg1_vlan[1], ip = 'ipv4') + mutliple_hosts(dut = data.dut2, vlan = dut2_tg1_vlan[1], ip = 'ipv4') + mutliple_hosts(dut = data.dut1, vlan = dut1_tg1_vlan[1], ip = 'ipv6') + mutliple_hosts(dut = data.dut2, vlan = dut2_tg1_vlan[1], ip = 'ipv6') + + if config == '' and pc != '': + st.log('######------Push multiple hosts on DUT1 on vlan 3 and vlan 8------######') + mutliple_hosts(dut = data.dut1, vlan = dut1_tg1_vlan[2], ip = 'ipv4') + mutliple_hosts(dut = data.dut2, vlan = dut2_tg1_vlan[2], ip = 'ipv4') + mutliple_hosts(dut = data.dut1, vlan = dut1_tg1_vlan[2], ip = 'ipv6') + mutliple_hosts(dut = data.dut2, vlan = dut2_tg1_vlan[2], ip = 'ipv6') + + if config == 'no' and phy != '': + st.log('######------Removes all the traffic streams configured on the port.------######') + data.tg1.tg_traffic_control(action = 'reset', port_handle = data.tg_dut1_p1) + data.tg2.tg_traffic_control(action = 'reset', port_handle = data.tg_dut2_p1) diff --git a/spytest/tests/routing/VRF/vrf_vars.py b/spytest/tests/routing/VRF/vrf_vars.py new file mode 100644 index 00000000000..37a08f50ff9 --- /dev/null +++ b/spytest/tests/routing/VRF/vrf_vars.py @@ -0,0 +1,114 @@ +from spytest.dicts import SpyTestDict + +data = SpyTestDict() +data.test_var = 'testing' +data.mac_aging = 10 + +vrf_name = ['Vrf-'+'%s'%x for x in range (101,105)] +vrf_name_scale = ['Vrf-'+'%s'%x for x in range (1,1000)] +#Vlan +dut1_dut2_vlan = ['%s'%x for x in range (101,105)] +dut2_dut1_vlan = ['%s'%x for x in range (101,105)] + +dut1_dut2_vlan_scale = ['%s'%x for x in range (1,1000)] + +dut1_tg1_vlan = ['%s'%x for x in range (1,4)] +dut2_tg1_vlan = ['%s'%x for x in range (6,9)] + +#Loopback +dut1_loopback = ['Loopback101', 'Loopback102', 'Loopback103'] +dut2_loopback = ['Loopback101', 'Loopback102', 'Loopback103'] + +dut1_loopback_ip = ['50.0.%s.1'%x for x in range (1,4)] +dut2_loopback_ip = ['60.0.%s.1'%x for x in range (1,4)] +dut1_loopback_ip_subnet = '32' +dut2_loopback_ip_subnet = '32' + +dut1_loopback_ipv6 = ['500%s::1'%x for x in range (1,4)] +dut2_loopback_ipv6 = ['600%s::1'%x for x in range (1,4)] +dut1_loopback_ipv6_subnet = '128' +dut2_loopback_ipv6_subnet = '128' + +#DUT and TG IPs +dut1_dut2_vrf_ip = ['3.0.%s.1'%x for x in range(101,105)] +dut2_dut1_vrf_ip = ['3.0.%s.2'%x for x in range(101,105)] +dut1_dut2_vrf_ip_subnet = '24' +dut2_dut1_vrf_ip_subnet = '24' + +dut1_dut2_vrf_ipv6 = ['3%s::1'%x for x in range(101,201)] +dut2_dut1_vrf_ipv6 = ['3%s::2'%x for x in range(101,201)] +dut1_dut2_vrf_ipv6_subnet = '64' +dut2_dut1_vrf_ipv6_subnet = '64' + +dut1_tg1_vrf_ip = ['1.0.%s.1'%x for x in range (1,4)] +tg1_dut1_vrf_ip = ['1.0.%s.2'%x for x in range (1,4)] +dut1_tg1_vrf_ip_subnet = '24' +tg1_dut1_vrf_ip_subnet = '24' + +dut1_tg1_vrf_ipv6 = ['201%s::1'%x for x in range (1,4)] +tg1_dut1_vrf_ipv6 = ['201%s::2'%x for x in range (1,4)] +dut1_tg1_vrf_ipv6_subnet = '64' +tg1_dut1_vrf_ipv6_subnet = '64' + +dut2_tg1_vrf_ip = ['6.0.%s.1'%x for x in range (6,9)] +tg1_dut2_vrf_ip = ['6.0.%s.2'%x for x in range (6,9)] +dut2_tg1_vrf_ip_subnet = '24' +tg1_dut2_vrf_ip_subnet = '24' + +dut2_tg1_vrf_ipv6 = ['202%s::1'%x for x in range (6,9)] +tg1_dut2_vrf_ipv6 = ['202%s::2'%x for x in range (6,9)] +dut2_tg1_vrf_ipv6_subnet = '64' +tg1_dut2_vrf_ipv6_subnet = '64' + +#BGP dut1 parameter +dut1_as = ['%s'%x for x in range (101,201)] +#dut1_as = ['101','101','101'] + +dut1_router_id = '11.11.11.11' +dut1_keepalive = '60' +dut1_holddown = '180' +dut1_ip_peergroup = 'ip_peergroup' +dut1_ipv6_peergroup = 'ipv6_peergroup' + +#BGP dut2 parameters +dut2_as = ['%s'%x for x in range (101,201)] +#dut2_as = ['101','101','101'] + +dut2_router_id = '22.22.22.22' +dut2_keepalive = '60' +dut2_holddown = '180' +dut2_ip_peergroup = 'ip_peergroup' +dut2_ipv6_peergroup = 'ipv6_peergroup' + +#BGP TG parameters +dut1_tg_as = '300' +dut2_tg_as = '300' + +#DUT1 traffic parameters +dut1_hosts = '10' +tg_dut1_rate_pps = '2000' +tg_dut1_p1_v4_routes = '50' +tg_dut1_p1_v4_prefix = '10.0.0.1' +tg_dut1_p1_v6_routes = '50' +tg_dut1_p1_v6_prefix = '2110::1' + +#DUT2 traffic parameters +dut2_hosts = '10' +tg_dut2_rate_pps = '2000' +tg_dut2_p1_v4_routes = '50' +tg_dut2_p1_v4_route_prefix = '11.0.0.1' +tg_dut2_p1_v6_routes = '50' +tg_dut2_p1_v6_route_prefix = '2111::1' +# +############################################################################## +############################################################################### + +vrf_list = ['Vrf-'+'%s'%x for x in range (1,1000)] +dut1_dut2_vlan_list = ['%s'%x for x in range (1,1000)] +dut1_as_scale = ['%s'%x for x in range (1,100)] +dut2_as_scale = ['%s'%x for x in range (101,199)] +dut1_loopback_scale = ['Loopback-'+'%s'%x for x in range (1,100)] +dut2_loopback_scale = ['Loopback-'+'%s'%x for x in range (1,100)] + +wait_time_vrf = 400 #400 #130 for 400 intf , 350 for full scl +wait_time_vlan = 2300 #2300 # 2220 for 1000 intf diff --git a/spytest/tests/routing/test_ip.py b/spytest/tests/routing/test_ip.py index 9a57ea00ce8..88426cd3257 100644 --- a/spytest/tests/routing/test_ip.py +++ b/spytest/tests/routing/test_ip.py @@ -17,7 +17,6 @@ import apis.routing.route_map as rmap_obj import apis.switching.mac as mac_obj import apis.routing.arp as arp_obj -import apis.common.asic_bcm as asicbcm_obj vars = dict() data = SpyTestDict() @@ -65,7 +64,7 @@ def ip_module_hooks(request): global vars, tg_handler, tg # Min topology verification st.log("Ensuring minimum topology") - vars = st.ensure_min_topology("D1T1:2", "D2T1:2", "D1D2:4") + vars = st.ensure_min_topology("D1T1:4", "D2T1:2", "D1D2:4") # Initialize TG and TG port handlers tg_handler = tgapi.get_handles_byname("T1D1P1", "T1D1P2", "T1D2P1", "T1D2P2") tg = tg_handler["tg"] @@ -643,21 +642,25 @@ def test_ft_verify_interfaces_order(): @pytest.fixture(scope="function") def ceta_31902_fixture(request,ip_module_hooks): + ipfeature.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.ip6_addr[1], + 96, family=data.af_ipv6,config="remove") vlan_obj.create_vlan(vars.D1, [data.host1_vlan,data.host2_vlan]) - vlan_obj.add_vlan_member(vars.D1, data.host1_vlan, [vars.D1T1P3,vars.D1T1P4], tagging_mode=True) - vlan_obj.add_vlan_member(vars.D1, data.host2_vlan, [vars.D1T1P3, vars.D1T1P4], tagging_mode=True) + vlan_obj.add_vlan_member(vars.D1, data.host1_vlan, [vars.D1T1P1,vars.D1T1P2], tagging_mode=True) + vlan_obj.add_vlan_member(vars.D1, data.host2_vlan, [vars.D1T1P1, vars.D1T1P2], tagging_mode=True) ipfeature.config_ip_addr_interface(vars.D1, "Vlan"+data.host1_vlan, data.vlan1_ip, 24, family=data.af_ipv4) ipfeature.config_ip_addr_interface(vars.D1, "Vlan" + data.host2_vlan, data.vlan2_ip, 24, family=data.af_ipv4) yield ipfeature.delete_ip_interface(vars.D1, "Vlan"+data.host1_vlan, data.vlan1_ip, "24", family="ipv4") ipfeature.delete_ip_interface(vars.D1, "Vlan"+data.host2_vlan, data.vlan2_ip, "24", family="ipv4") - vlan_obj.delete_vlan_member(vars.D1, data.host1_vlan, [vars.D1T1P3, vars.D1T1P4], True) - vlan_obj.delete_vlan_member(vars.D1, data.host2_vlan, [vars.D1T1P3, vars.D1T1P4], True) + vlan_obj.delete_vlan_member(vars.D1, data.host1_vlan, [vars.D1T1P1, vars.D1T1P2], True) + vlan_obj.delete_vlan_member(vars.D1, data.host2_vlan, [vars.D1T1P1, vars.D1T1P2], True) vlan_obj.delete_vlan(vars.D1, [data.host1_vlan,data.host2_vlan]) + ipfeature.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.ip6_addr[1], 96, family=data.af_ipv6) + def test_Ceta_31902(ceta_31902_fixture): success=True - mac_obj.config_mac(dut=vars.D1,mac=data.host1_mac, vlan=data.host1_vlan, intf=vars.D1T1P3) + mac_obj.config_mac(dut=vars.D1,mac=data.host1_mac, vlan=data.host1_vlan, intf=vars.D1T1P1) arp_obj.add_static_arp(dut=vars.D1, ipaddress=data_tg_ip, macaddress=data.host1_mac, interface="Vlan"+data.host1_vlan) @@ -669,14 +672,14 @@ def test_Ceta_31902(ceta_31902_fixture): interface="Vlan"+data.host1_vlan) arp_obj.delete_static_arp(dut=vars.D1, ipaddress=data_tg_ip, interface="Vlan"+data.host1_vlan, mac=data.host2_mac) - mac_obj.config_mac(dut=vars.D1,mac=data.host2_mac, vlan=data.host1_vlan, intf=vars.D1T1P4) + mac_obj.config_mac(dut=vars.D1,mac=data.host2_mac, vlan=data.host1_vlan, intf=vars.D1T1P2) arp_obj.add_static_arp(dut=vars.D1, ipaddress=data_tg_ip, macaddress=data.host2_mac, interface="Vlan"+data.host1_vlan) mac_obj.delete_mac(dut=vars.D1, mac=data.host1_mac, vlan=data.host1_vlan) - mac_obj.config_mac(dut=vars.D1,mac=data.host1_mac, vlan=data.host1_vlan, intf=vars.D1T1P3) - l2_out=asicbcm_obj.get_param_from_bcmcmd_output(vars.D1,'l2 show',["gport"],{"mac" : data.host2_mac}) - l3_out= asicbcm_obj.get_param_from_bcmcmd_output(vars.D1,'l3 egress show',["port"],{"mac" : data.host2_mac}) + mac_obj.config_mac(dut=vars.D1,mac=data.host1_mac, vlan=data.host1_vlan, intf=vars.D1T1P1) + l2_out = asicapi.get_l2_out(vars.D1, data.host2_mac) + l3_out = asicapi.get_l3_out(vars.D1, data.host2_mac) if l2_out and l3_out: l2_gport=l2_out[0]["gport"][9] l3_port=l3_out[0]["port"] diff --git a/spytest/tests/routing/test_l3_performance.py b/spytest/tests/routing/test_l3_performance.py new file mode 100644 index 00000000000..9eb36c511d3 --- /dev/null +++ b/spytest/tests/routing/test_l3_performance.py @@ -0,0 +1,754 @@ +import pytest +import datetime + +from spytest import st, tgapi, SpyTestDict +from spytest.utils import filter_and_select + +import apis.routing.ip as ipfeature +import apis.system.port as papi +import apis.routing.bgp as bgpfeature +import apis.system.interface as interface_obj +import apis.system.basic as basic_obj +import apis.common.asic as asicapi +import apis.switching.vlan as vlan + +data = SpyTestDict() +data.as_num = 100 +data.remote_as_num = 200 +data.counters_threshold = 0 +data.my_ip_addr = "10.10.10.1" +data.neigh_ip_addr = "10.10.10.2" +data.intf_ip_addr = "20.20.20.1" +data.neigh_ip_addr2 = "20.20.20.2" +data.ip_prefixlen = "24" +data.my_ipv6_addr = "2000::1" +data.neigh_ipv6_addr = "2000::2" +data.intf_ipv6_addr = "3000::1" +data.neigh_ipv6_addr2 = "3000::2" +data.ipv6_prefixlen = "64" +data.test_bgp_route_count = 20000 +data.traffic_rate_pps = data.test_bgp_route_count +data.includeTraffic = False + +@pytest.fixture(scope="module", autouse=True) +def l3_performance_enhancements_module_hooks(request): + global vars, tg_handler, tg, dut, dut_to_tg_port_1, dut_to_tg_port_2, cli_type + global hwsku_under_test, def_v4_route_count, def_v6_route_count + + vars = st.ensure_min_topology("D1T1:2") + + # Initialize TG and TG port handlers + tg_handler = tgapi.get_handles(vars, [vars.T1D1P1, vars.T1D1P2]) + tg = tg_handler["tg"] + + if tgapi.is_soft_tgen(vars): + data.test_bgp_route_count = 200 + + # Test setup details + dut = vars.D1 + dut_to_tg_port_1 = vars.D1T1P1 + dut_to_tg_port_2 = vars.D1T1P2 + hwsku_under_test = basic_obj.get_hwsku(dut) + cli_type = st.get_ui_type(dut) + + # Module Configuration + st.log("L3 Performance Enhancements Module Configuration.") + # Configuring v4/v6 routing interfaces on the DUT. + ipfeature.config_ipv6(dut, action='enable') + ipfeature.config_ip_addr_interface(dut, dut_to_tg_port_1, data.my_ip_addr, data.ip_prefixlen, family="ipv4") + ipfeature.config_ip_addr_interface(dut, dut_to_tg_port_1, data.my_ipv6_addr, data.ipv6_prefixlen, family="ipv6") + ipfeature.config_ip_addr_interface(dut, dut_to_tg_port_2, data.intf_ip_addr, data.ip_prefixlen, family="ipv4") + ipfeature.config_ip_addr_interface(dut, dut_to_tg_port_2, data.intf_ipv6_addr, data.ipv6_prefixlen, family="ipv6") + + # Configuring BGP router and v4/v6 neighbors on the DUT. + bgpfeature.create_bgp_router(dut, data.as_num, '') + bgpfeature.create_bgp_neighbor(dut, data.as_num, data.neigh_ip_addr, data.remote_as_num) + bgpfeature.create_bgp_neighbor(dut, data.as_num, data.neigh_ipv6_addr, data.remote_as_num, family="ipv6") + + # Get the default route count from DUT + def_v4_route_count = asicapi.get_ipv4_route_count(dut) + def_v6_route_count = asicapi.get_ipv6_route_count(dut) + + yield + # Module Cleanup + st.log("L3 Performance Enhancements Module Cleanup.") + ipfeature.delete_ip_interface(dut, dut_to_tg_port_1, data.my_ip_addr, data.ip_prefixlen, family="ipv4") + ipfeature.delete_ip_interface(dut, dut_to_tg_port_1, data.my_ipv6_addr, data.ipv6_prefixlen, family="ipv6") + ipfeature.delete_ip_interface(dut, dut_to_tg_port_2, data.intf_ip_addr, data.ip_prefixlen, family="ipv4") + ipfeature.delete_ip_interface(dut, dut_to_tg_port_2, data.intf_ipv6_addr, data.ipv6_prefixlen, family="ipv6") + bgpfeature.delete_bgp_neighbor(dut, data.as_num, data.neigh_ip_addr, data.remote_as_num) + bgpfeature.delete_bgp_neighbor(dut, data.as_num, data.neigh_ipv6_addr, data.remote_as_num) + bgpfeature.cleanup_router_bgp(dut) + +@pytest.fixture(scope="function", autouse=True) +def l3_performance_enhancements_func_hooks(request): + # Function configuration + yield + # Function cleanup + +def check_intf_traffic_counters(dut, loopCnt, case): + flag = 0 + iter = 1 + p1_rcvd = 0 + p2_txmt = 0 + + while iter <= loopCnt: + output = papi.get_interface_counters_all(dut) + for entry in output: + if entry["iface"] == dut_to_tg_port_2: + DUT_rx_value = entry["rx_bps"] + if entry["iface"] == dut_to_tg_port_1: + DUT_tx_value = entry["tx_bps"] + p1_rcvd = DUT_rx_value + p1_rcvd = p1_rcvd.replace(" MB/s","") + p1_rcvd = p1_rcvd.replace(" B/s","") + p1_rcvd = p1_rcvd.replace(" KB/s","") + p1_rcvd = p1_rcvd.replace(" GB/s","") + p1_rcvd = 0.0 if p1_rcvd == "" else p1_rcvd + p2_txmt = DUT_tx_value + p2_txmt = p2_txmt.replace(" MB/s","") + p2_txmt = p2_txmt.replace(" B/s","") + p2_txmt = p2_txmt.replace(" KB/s","") + p2_txmt = p2_txmt.replace(" GB/s","") + p2_txmt = 0.0 if p2_txmt == "" else p2_txmt + + st.log("rx_ok counter value on DUT Ingress port: {} and tx_ok xounter value on DUT Egress port : {}".format(p1_rcvd, p2_txmt)) + if cli_type == "klish": + st.log("Converting counters to bits by multiplying 1000000") + p2_txmt = int(float(p2_txmt)*1000000) + p1_rcvd = int(float(p1_rcvd)*1000000) + if case == "install": + if (int(float(p2_txmt)) == 0): + flag = 0 + break + + if (abs(int(float(p1_rcvd))-int(float(p2_txmt))) == data.counters_threshold): + flag = 1 + break + elif case == "withdraw": + if (int(float(p2_txmt))) == 0: + flag = 1 + break + iter = iter+1 + + if flag: + return True + else: + return False + +def check_bcmcmd_route_count(dut, loopCnt, ipType, defcount, expcount): + flag = 0 + iter = 1 + while iter <= loopCnt: + if ipType == "ipv4": + curr_count = asicapi.get_ipv4_route_count(dut) + elif ipType == "ipv6": + curr_count = asicapi.get_ipv6_route_count(dut) + + route_cnt = int(curr_count) - int(defcount) + + st.log("Learnt route count after iteration {} : {}".format(iter,route_cnt)) + + if int(route_cnt) == int(expcount): + flag = 1 + break + iter = iter+1 + + if flag: + return True + else: + return False + +def verify_bgp_route_count(dut,family='ipv4',shell="sonic",**kwargs): + if family.lower() == 'ipv4': + output = bgpfeature.show_bgp_ipv4_summary(dut) + if family.lower() == 'ipv6': + output = bgpfeature.show_bgp_ipv6_summary(dut) + st.debug(output) + if 'neighbor' in kwargs and 'state' in kwargs: + match = {'neighbor': kwargs['neighbor']} + try: + entries = filter_and_select(output, None, match)[0] + except Exception: + st.log("ERROR 1") + if entries['state']: + if kwargs['state'] == 'Established': + if entries['state'].isdigit(): + return entries['state'] + else: + return 0 + else: + return 0 + else: + return 0 + return 0 + +@pytest.fixture(scope="function") +def fixture_v4(request): + global h1, h2, bgp_rtr1 + st.log("Test Fixture Config.") + # TG ports reset + st.log("Resetting the TG ports") + tgapi.traffic_action_control(tg_handler, actions=['reset']) + + # TG protocol interface creation + st.log("TG protocol interface creation") + h1 = tg.tg_interface_config(port_handle=tg_handler["tg_ph_1"], mode='config', + intf_ip_addr=data.neigh_ip_addr,gateway=data.my_ip_addr,arp_send_req='1') + st.log("INTFCONF: "+str(h1)) + h2 = tg.tg_interface_config(port_handle=tg_handler["tg_ph_2"], mode='config', + intf_ip_addr=data.neigh_ip_addr2,gateway=data.intf_ip_addr,arp_send_req='1') + st.log("INTFCONF: "+str(h2)) + + # Configuring BGP on TG interface + conf_var = {'mode':'enable', 'active_connect_enable':'1', 'local_as':data.remote_as_num, 'remote_as':data.as_num, 'remote_ip_addr':data.my_ip_addr} + route_var = {'mode':'add', 'num_routes':data.test_bgp_route_count, 'prefix':'121.1.1.0', 'as_path':'as_seq:1'} + ctrl_start = {'mode':'start'} + + # Starting the BGP router on TG. + bgp_rtr1 = tgapi.tg_bgp_config(tg=tg, handle=h1['handle'], conf_var=conf_var, route_var = route_var, ctrl_var=ctrl_start) + st.log("BGP_HANDLE: "+str(bgp_rtr1)) + + # Verifying the BGP neighborship + st.wait(10) + st.log("Verifying the BGP neighborship.") + if not bgpfeature.verify_bgp_summary(dut, neighbor=data.neigh_ip_addr, state='Established'): + st.report_fail("bgp_ip_peer_establish_fail", data.neigh_ip_addr) + + yield + st.log("Test Fixture Cleanup.") + tg.tg_interface_config(port_handle=tg_handler["tg_ph_1"], handle=h1['handle'], mode='destroy') + tg.tg_interface_config(port_handle=tg_handler["tg_ph_2"], handle=h2['handle'], mode='destroy') + +@pytest.fixture(scope="function") +def fixture_v6(request): + global h1, h2, bgp_rtr1, bgp_rtr2 + st.log("Test Fixture Config.") + # TG ports reset + st.log("Resetting the TG ports") + tgapi.traffic_action_control(tg_handler, actions=['reset']) + + # TG protocol interface creation + st.log("TG protocol interface creation") + h1 = tg.tg_interface_config(port_handle=tg_handler["tg_ph_1"], mode='config', + ipv6_intf_addr=data.neigh_ipv6_addr,ipv6_prefix_length=64, + ipv6_gateway=data.my_ipv6_addr,arp_send_req='1') + st.log("INTFCONF: "+str(h1)) + h2 = tg.tg_interface_config(port_handle=tg_handler["tg_ph_2"], mode='config', + ipv6_intf_addr=data.neigh_ipv6_addr2,ipv6_prefix_length=64, + ipv6_gateway=data.intf_ipv6_addr,arp_send_req='1') + st.log("INTFCONF: "+str(h2)) + + # Configuring BGP on TG interface + conf_var = {'mode':'enable', 'ip_version':'6', 'active_connect_enable':'1', 'local_as':data.remote_as_num, 'remote_as':data.as_num, 'remote_ipv6_addr':data.my_ipv6_addr} + route_var = {'mode':'add', 'ip_version':'6', 'num_routes':data.test_bgp_route_count, 'prefix':'3300:1::', 'as_path':'as_seq:1'} + ctrl_start = {'mode':'start'} + + # Starting the BGP router on TG. + bgp_rtr2 = tgapi.tg_bgp_config(tg=tg, handle=h1['handle'], conf_var=conf_var, route_var = route_var, ctrl_var=ctrl_start) + st.log("BGP_HANDLE: "+str(bgp_rtr2)) + + # Verifying the BGP neighborship + st.wait(10) + st.log("Verifying the BGP neighborship.") + if not bgpfeature.verify_bgp_summary(dut, family='ipv6', neighbor=data.neigh_ipv6_addr, state='Established'): + st.report_fail("bgp_ip6_peer_establish_fail", data.neigh_ip_addr) + + yield + st.log("Test Fixture Cleanup.") + tg.tg_interface_config(port_handle=tg_handler["tg_ph_1"], handle=h1['handle'], mode='destroy') + tg.tg_interface_config(port_handle=tg_handler["tg_ph_2"], handle=h2['handle'], mode='destroy') + + +def show_ip_route_validation_cli(type='click'): + st.log("{} validation".format(type)) + test_case = 'FtOpSoRtPerfFn053' if type == 'click' else 'FtOpSoRtPerfFn052' + start_time = datetime.datetime.now() + if "via" not in st.show(dut, "show ip route", type=type, skip_tmpl=True, max_time=300): + st.report_tc_fail(test_case, 'test_case_failed') + end_time = datetime.datetime.now() + st.log("start_time for route display using {}: {} ".format(type,start_time)) + st.log("end_time for route display using {}: {} ".format(type,end_time)) + time_diff_in_secs = end_time - start_time + st.log("time_diff_in_secs: {}".format(time_diff_in_secs)) + st.report_tc_pass(test_case,'test_case_passed') + +def bgp_router_cli_validation(dut,type = "vtysh"): + cmd = ['router bgp 100'] + for each in range(101,121): + if type == 'vtysh': + cmd.append('neighbor 192.168.{}.2 remote-as 300'.format(each)) + else: + cmd.extend(['neighbor 192.168.{}.2'.format(each), 'remote-as 300', 'exit']) + cmd.append('exit') + cmd.append('no router bgp') + if type == "vtysh": + for each in cmd: + st.vtysh_config(dut,each) + else: + st.config(dut, cmd, type=type, skip_error_check=True) + +@pytest.mark.test_ft_l3_performance_enhancements_v4_route_intstall_withdraw +def test_ft_l3_performance_enhancements_v4_route_intstall_withdraw(fixture_v4): + ################# Author Details ################ + # Name: Rakesh Kumar Vooturi + # Email: rakesh-kumar.vooturi@broadcom.com + ################################################# + # + # Objective - FtOpSoRtPerfFn025 : Performance measurement for IP route installation in hardware + # Measure time taken for routes to get installed into the hardware + # (with fresh advertisement of routes ...but not due to any other trigger). + # Objective - FtOpSoRtPerfFn026 : Performance measurement for IP route withdraw in hardware + # Measure time taken for routes to get removed from the hardware by stop advertising the Routes from neighbor. + # + ############### Test bed details ################ + # TG --- DUT --- TG + ################################################# + # Withdraw the routes. + ctrl1=tg.tg_bgp_routes_control(handle=bgp_rtr1['conf']['handle'], route_handle=bgp_rtr1['route'][0]['handle'], mode='withdraw') + st.log("TR_CTRL: "+str(ctrl1)) + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, 0): + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != 0: + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + if data.includeTraffic: + # Configuring traffic stream on the TG interface + tr1=tg.tg_traffic_config(port_handle=tg_handler["tg_ph_2"], emulation_src_handle=h2['handle'], + emulation_dst_handle=bgp_rtr1['route'][0]['handle'], circuit_endpoint_type='ipv4', + mode='create', transmit_mode='continuous', length_mode='fixed', + rate_pps=data.traffic_rate_pps, enable_stream_only_gen='0') + + # Starting the TG traffic after clearing the DUT counters + papi.clear_interface_counters(dut) + tg.tg_traffic_control(action="run",handle=tr1['stream_id']) + + st.banner("Measuring time taken for route installation of {} ipv4 routes on HWSKU {}".format(data.test_bgp_route_count, hwsku_under_test)) + + # Taking the start time timestamp + #start_time = datetime.datetime.now() + + # Readvertise the routes. + ctrl1=tg.tg_bgp_routes_control(handle=bgp_rtr1['conf']['handle'], route_handle=bgp_rtr1['route'][0]['handle'], mode='readvertise') + st.log("TR_CTRL: "+str(ctrl1)) + + # Verify the total route count using bcmcmd + result = True + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, data.test_bgp_route_count): + st.error("route_table_not_updated_by_advertise_from_tg") + result = False + + if data.includeTraffic: + st.wait(25) + # Verifying the BGP route count on the DUT + if not check_intf_traffic_counters(dut, 20, "install"): + st.error("ingress_traffic_rate_not_matching_with_egress_rate") + result = False + if result: + st.report_tc_pass("FtOpSoRtPerfFn025","test_case_passed") + else: + st.report_tc_fail("FtOpSoRtPerfFn025","ingress_traffic_rate_not_matching_with_egress_rate") + + # Taking the end time timestamp + #end_time = datetime.datetime.now() + + # verifying click cli show ip route validation + show_ip_route_validation_cli('click') + st.banner("verifying vtysh cli show ip route") + start_time = datetime.datetime.now() + st.show(dut, "show ip route", type="vtysh", skip_tmpl=True, max_time=300) + end_time = datetime.datetime.now() + time_diff_in_secs = end_time - start_time + st.banner("time_diff_in_secs for route display using vtysh: {} ".format(time_diff_in_secs)) + + #verifying klish cli show ip route validation + if st.is_feature_supported("klish"): + show_ip_route_validation_cli('klish') + + # Verify the total route count using SONiC CLI + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != int(data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Time taken for route installation + st.log("Start Time: {}".format(start_time)) + st.log("End Time: {}".format(end_time)) + time_in_secs = end_time - start_time + + st.banner("Time taken for intsalling {} v4 routes on HWSKU {} = ".format(data.test_bgp_route_count,hwsku_under_test) +str(time_in_secs.seconds)) + st.banner("Measuring time taken for route withdraw of {} ipv4 routes on HWSKU {}".format(data.test_bgp_route_count,hwsku_under_test)) + + # Taking the start time timestamp + start_time = datetime.datetime.now() + + # Withdraw the routes. + ctrl1=tg.tg_bgp_routes_control(handle=bgp_rtr1['conf']['handle'], route_handle=bgp_rtr1['route'][0]['handle'], mode='withdraw') + st.log("TR_CTRL: "+str(ctrl1)) + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, 0): + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + if data.includeTraffic: + # Verifying the BGP route count on the DUT + if not check_intf_traffic_counters(dut, 20, "withdraw"): + st.report_fail("egress_traffic_rate_not_zero") + + # Taking the end time timestamp + end_time = datetime.datetime.now() + + # Verify the total route count using SONiC CLI + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != 0: + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + # Time taken for route withdraw + st.log("Start Time: {}".format(start_time)) + st.log("End Time: {}".format(end_time)) + time_in_secs = end_time - start_time + + st.banner("Time taken for withdrawing {} v4 routes on HWSKU {} = ".format(data.test_bgp_route_count,hwsku_under_test) +str(time_in_secs.seconds)) + + # Stopping the TG traffic + if data.includeTraffic: + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.report_pass("test_case_passed") + + +def test_ft_l3_performance_enhancements_v6_route_intstall_withdraw(fixture_v6): + ################# Author Details ################ + # Name: Rakesh Kumar Vooturi + # Email: rakesh-kumar.vooturi@broadcom.com + ################################################# + # + # Objective - FtOpSoRtPerfFn042 : Performance measurement for IPv6 route installation in hardware + # Measure time taken for routes to get installed into the hardware + # (with fresh advertisement of routes ...but not due to any other trigger). + # Objective - FtOpSoRtPerfFn043 : Performance measurement for IPv6 route withdraw in hardware + # Measure time taken for routes to get removed from the hardware by stop advertising the Routes from neighbor. + # + ############### Test bed details ################ + # TG --- DUT --- TG + ################################################# + # Withdraw the routes. + ctrl1=tg.tg_bgp_routes_control(handle=bgp_rtr2['conf']['handle'], route_handle=bgp_rtr2['route'][0]['handle'], mode='withdraw') + st.log("TR_CTRL: "+str(ctrl1)) + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv6", def_v6_route_count, 0): + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv6', neighbor=data.neigh_ipv6_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != 0: + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + if data.includeTraffic: + # Configuring traffic stream on the TG interface + tr2=tg.tg_traffic_config(port_handle=tg_handler["tg_ph_2"], emulation_src_handle=h2['handle'], + emulation_dst_handle=bgp_rtr2['route'][0]['handle'], circuit_endpoint_type='ipv6', + mode='create', transmit_mode='continuous', length_mode='fixed', + rate_pps=data.traffic_rate_pps, enable_stream_only_gen='0') + + # Starting the TG traffic after clearing the DUT counters + papi.clear_interface_counters(dut) + tg.tg_traffic_control(action="run",handle=tr2['stream_id']) + + st.banner("# Measuring time taken for route installation of {} ipv6 routes on HWSKU {}".format(data.test_bgp_route_count,hwsku_under_test)) + + # Taking the start time timestamp + start_time = datetime.datetime.now() + + # Readvertise the routes. + ctrl1=tg.tg_bgp_routes_control(handle=bgp_rtr2['conf']['handle'], route_handle=bgp_rtr2['route'][0]['handle'], mode='readvertise') + st.log("TR_CTRL: "+str(ctrl1)) + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv6", def_v6_route_count, data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + if data.includeTraffic: + st.wait(25) + # Verifying the BGP route count on the DUT + if not check_intf_traffic_counters(dut, 20, "install"): + st.report_fail("ingress_traffic_rate_not_matching_with_egress_rate") + + # Taking the end time timestamp + end_time = datetime.datetime.now() + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv6', neighbor=data.neigh_ipv6_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != int(data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Time taken for route installation + st.log("Start Time: {}".format(start_time)) + st.log("End Time: {}".format(end_time)) + time_in_secs = end_time - start_time + + st.banner("Time taken for intsalling {} v6 routes on HWSKU {} = ".format(data.test_bgp_route_count,hwsku_under_test) +str(time_in_secs.seconds)) + st.banner("# Measuring time taken for route withdraw of {} ipv4 routes on HWSKU {}".format(data.test_bgp_route_count,hwsku_under_test)) + + # Taking the start time timestamp + start_time = datetime.datetime.now() + + # Withdraw the routes. + ctrl1=tg.tg_bgp_routes_control(handle=bgp_rtr2['conf']['handle'], route_handle=bgp_rtr2['route'][0]['handle'], mode='withdraw') + st.log("TR_CTRL: "+str(ctrl1)) + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv6", def_v6_route_count, 0): + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + if data.includeTraffic: + # Verifying the BGP route count on the DUT + if not check_intf_traffic_counters(dut, 20, "withdraw"): + st.report_fail("egress_traffic_rate_not_zero") + + # Taking the end time timestamp + end_time = datetime.datetime.now() + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv6', neighbor=data.neigh_ipv6_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != 0: + st.report_fail("route_table_not_cleared_by_withdraw_from_tg") + + # Time taken for route withdraw + st.log("Start Time: {}".format(start_time)) + st.log("End Time: {}".format(end_time)) + time_in_secs = end_time - start_time + + st.banner("Time taken for withdrawing {} v6 routes on HWSKU {} = ".format(data.test_bgp_route_count,hwsku_under_test) +str(time_in_secs.seconds)) + + if data.includeTraffic: + # Stopping the TG traffic + tg.tg_traffic_control(action='stop', handle=tr2['stream_id']) + st.report_pass("test_case_passed") + +def test_ft_l3_performance_enhancements_v4_bgp_link_flap_convergence_time(fixture_v4): + ################# Author Details ################ + # Name: Rakesh Kumar Vooturi + # Email: rakesh-kumar.vooturi@broadcom.com + ################################################# + # + # Objective - FtOpSoRtPerfFn031 : Performance measurement for BGP Link Flap case + # Measure time taken for the traffic ( corresponding to BGP routes ) + # to resume when the link carrying traffic ( corresponding to BGP routes ) + # goes down and then comes up. + # Measurement is taken from the time of link up + # (i.e link up after link going down) to the time of RX rate = TX rate. + # + ############### Test bed details ################ + # TG --- DUT --- TG + ################################################# + # Configuring traffic stream on the TG interface + tr1=tg.tg_traffic_config(port_handle=tg_handler["tg_ph_2"], emulation_src_handle=h2['handle'], + emulation_dst_handle=bgp_rtr1['route'][0]['handle'], + circuit_endpoint_type='ipv4', mode='create', transmit_mode='continuous', + length_mode='fixed', rate_pps=data.traffic_rate_pps, enable_stream_only_gen='0') + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != int(data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Starting the TG traffic after clearing the DUT counters + papi.clear_interface_counters(dut) + tg.tg_traffic_control(action="run",handle=tr1['stream_id']) + + st.wait(25) + + # Verifying the BGP route count on the DUT + if not check_intf_traffic_counters(dut, 20, "install"): + st.report_fail("ingress_traffic_rate_not_matching_with_egress_rate") + + # Shutdown the routing interface link. + st.log("Shutdown the routing interface links.") + if not interface_obj.interface_operation(dut, dut_to_tg_port_1 , "shutdown"): + st.report_fail('interface_admin_shut_down_fail', dut_to_tg_port_1) + + if not interface_obj.poll_for_interface_status(dut, dut_to_tg_port_1, 'oper', 'down', iteration=10, delay=1): + st.report_fail('interface_admin_shut_down_fail', dut_to_tg_port_1) + + st.banner("# Measuring Convergence time ( control plane + data plane ) taken for BGP Link Flap scenario on HWSKU {}".format(hwsku_under_test)) + + # Startup the routing interface link. + st.log("Startup the routing interface link.") + if not interface_obj.interface_operation(dut, dut_to_tg_port_1, "startup"): + st.report_fail('interface_admin_startup_fail', dut_to_tg_port_1) + + if not interface_obj.poll_for_interface_status(dut, dut_to_tg_port_1, 'oper', 'up', iteration=10, delay=1): + st.report_fail('interface_admin_startup_fail', dut_to_tg_port_1) + + # Taking the start time timestamp + start_time = datetime.datetime.now() + bgpfeature.config_bgp(dut=vars.D1, local_as=data.as_num, config='yes', neighbor=data.neigh_ip_addr, config_type_list=["connect"], connect='1') + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != int(data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Taking the end time timestamp + end_time = datetime.datetime.now() + + # Time taken for route installation + st.log("Start Time: {}".format(start_time)) + st.log("End Time: {}".format(end_time)) + time_in_secs = end_time - start_time + + st.banner("Convergence time ( control plane + data plane ) taken for BGP Link Flap scenario (After Link up, time taken for BGP to re establish, learn and install the routes to hardware) on HWSKU {} = ".format(hwsku_under_test) +str(time_in_secs.seconds)) + + # Stopping the TG traffic + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.report_pass("test_case_passed") + +def test_ft_l3_performance_enhancements_v4_bgp_session_failover_convergence_time(fixture_v4): + ################# Author Details ################ + # Name: Rakesh Kumar Vooturi + # Email: rakesh-kumar.vooturi@broadcom.com + ################################################# + # + # Objective - FtOpSoRtPerfFn032 : Performance measurement for BGP Session Failover case + # Measure time taken for the traffic ( corresponding to BGP routes ) to resume fully + # (i.e RX rate = TX rate) after the BGP session flaps ( i.e BGP session goes down and then comes up + # This is triggered using "clear ip bgp * " CLI command ). + # + ############### Test bed details ################ + # TG --- DUT --- TG + ################################################# + # Configuring traffic stream on the TG interface + tr1=tg.tg_traffic_config(port_handle=tg_handler["tg_ph_2"], emulation_src_handle=h2['handle'], + emulation_dst_handle=bgp_rtr1['route'][0]['handle'], circuit_endpoint_type='ipv4', + mode='create', transmit_mode='continuous', length_mode='fixed', + rate_pps=data.traffic_rate_pps, enable_stream_only_gen='0') + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != int(data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Starting the TG traffic after clearing the DUT counters + papi.clear_interface_counters(dut) + tg.tg_traffic_control(action="run",handle=tr1['stream_id']) + + st.wait(25) + + # Verifying the BGP route count on the DUT + if not check_intf_traffic_counters(dut, 20, "install"): + st.report_fail("ingress_traffic_rate_not_matching_with_egress_rate") + + # Clearing the BGP session. + st.log("Clearing the BGP session.") + bgpfeature.clear_ip_bgp(dut) + + st.banner("# Measuring Convergence time ( control plane + data plane ) taken for BGP session failover scenario on HWSKU {}".format(hwsku_under_test)) + # Taking the start time timestamp + start_time = datetime.datetime.now() + bgpfeature.config_bgp(dut=vars.D1, local_as=data.as_num, config='yes', neighbor=data.neigh_ip_addr, config_type_list=["connect"], connect='1') + + # Verify the total route count using bcmcmd + if not check_bcmcmd_route_count(dut, 50, "ipv4", def_v4_route_count, data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Verify the total route count + count = verify_bgp_route_count(dut, family='ipv4', neighbor=data.neigh_ip_addr, state='Established') + st.log("Route count: "+str(count)) + if int(count) != int(data.test_bgp_route_count): + st.report_fail("route_table_not_updated_by_advertise_from_tg") + + # Taking the end time timestamp + end_time = datetime.datetime.now() + + # Time taken for route installation + st.log("Start Time: {}".format(start_time)) + st.log("End Time: {}".format(end_time)) + time_in_secs = end_time - start_time + + st.banner("Convergence time ( control plane + data plane ) taken for BGP session failover scenario (After session up, time taken for BGP to re establish, learn and install the routes to hardware) on HWSKU {} = ".format(hwsku_under_test) +str(time_in_secs.seconds)) + + # Stopping the TG traffic + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.report_pass("test_case_passed") + +@pytest.mark.test_cli_validation_ip_address +def test_cli_validation_ip_address(): + vlan.config_vlan_range(dut, vlan_range = "101 121", config="add", skip_verify=False) + st.banner("click cli validation for ip address config") + start_time = datetime.datetime.now() + st.log("IP address config on 20 vlan routing interface using click") + for each in range(101,121): + cmd = ["config interface ip add Vlan{} 192.168.{}.1/31".format(each, each)] + st.config(dut, cmd, type="click") + end_time = datetime.datetime.now() + time_diff = end_time - start_time + st.banner("time taken for IP address config on 20 vlan routing interface using click: {}".format(time_diff)) + + st.log("IP address unconfig on 20 vlan routing interface using click") + for each in range(101, 121): + cmd = ["config interface ip remove Vlan{} 192.168.{}.1/31".format(each, each)] + st.config(dut, cmd, type="click") + st.report_tc_pass("FtOpSoRtPerfFn054","test_case_passed") + + if st.is_feature_supported("klish"): + st.banner("klish cli validation for ip address config") + start_time = datetime.datetime.now() + for each in range(101, 121): + cmd = ["interface Vlan {}".format(each), 'ip address 192.168.{}.1/31'.format(each), 'exit'] + st.config(dut, cmd, type="klish") + end_time = datetime.datetime.now() + time_diff = end_time - start_time + st.banner("time taken for IP address config on 20 vlan routing interface using klish: {}".format(time_diff)) + for each in range(101, 121): + cmd = ["interface Vlan {}".format(each), 'no ip address 192.168.{}.1/31'.format(each), 'exit'] + st.config(dut, cmd, type="klish") + st.report_tc_pass("FtOpSoRtPerfFn055", "test_case_passed") + st.report_pass("test_case_passed") + + +@pytest.mark.test_cli_validation_bgp_router_config +def test_cli_validation_bgp_router_config(): + st.banner("vtysh cli validation for bgp router config") + start_time = datetime.datetime.now() + bgp_router_cli_validation(dut, type="vtysh") + end_time = datetime.datetime.now() + time_diff = end_time - start_time + st.banner("time taken for BGP router config on 20 vlan routing interface using vtysh: {}".format(time_diff)) + st.report_tc_pass("FtOpSoRtPerfFn056", "test_case_passed") + + if st.is_feature_supported("klish"): + st.banner("klish cli validation for bgp router config") + start_time = datetime.datetime.now() + bgp_router_cli_validation(dut, type="klish") + end_time = datetime.datetime.now() + time_diff = end_time - start_time + st.banner("time taken for BGP router config on 20 vlan routing interface using klish: {}".format(time_diff)) + st.report_tc_pass("FtOpSoRtPerfFn057", "test_case_passed") + st.report_pass("test_case_passed") + diff --git a/spytest/tests/routing/test_l3_scale_ecmp_paths.py b/spytest/tests/routing/test_l3_scale_ecmp_paths.py index 5292c987cfe..3eb2c3a4fed 100644 --- a/spytest/tests/routing/test_l3_scale_ecmp_paths.py +++ b/spytest/tests/routing/test_l3_scale_ecmp_paths.py @@ -147,7 +147,7 @@ def check_end_to_end_intf_traffic_counters(): p1_txmt = p1_txmt.replace(" KB/s","") p1_txmt = p1_txmt.replace(" B/s","") if (abs(int(float(p1_txmt))) == 0): - output = st.show(dut1, "show arp") + st.show(dut1, "show arp") st.error("End to End traffic is Zero") return False else: @@ -224,7 +224,7 @@ def check_inter_dut_intf_traffic_counters(): st.log("Inter Dut port stats tx_ok xounter value on DUT Egress ports : {} {} {} {}".format(p1_txmt, p2_txmt, p3_txmt, p4_txmt)) if (abs(int(float(p1_txmt))) == 0) | (abs(int(float(p2_txmt))) == 0) | (abs(int(float(p3_txmt))) == 0) | (abs(int(float(p4_txmt))) == 0): - output = st.show(dut1, "show arp") + st.show(dut1, "show arp") st.error("Inter Dut port stats tx_ok xounter value on DUT Egress ports are non zero") return False else: @@ -273,7 +273,7 @@ def create_bgp_neighbor_config(dut, local_asn, neighbor_ip, remote_asn, routemap st.vtysh_config(dut, command) command = "neighbor {} route-map {} in".format(neighbor_ip, routemap) st.vtysh_config(dut, command) - command = "neighbor {} route-map {} out".format(neighbor_ip, routemap) + #command = "neighbor {} route-map {} out".format(neighbor_ip, routemap) return True @@ -303,7 +303,7 @@ def check_intf_traffic_counters(): p2_txmt = p2_txmt.replace(" B/s","") st.log("RETRY tx_ok xounter value on DUT Egress port : {}".format(p2_txmt)) if (abs(int(float(p2_txmt))) == 0): - output = st.show(dut1, "show arp") + st.show(dut1, "show arp") asicapi.dump_l3_egress(dut1) return False else: @@ -362,7 +362,7 @@ def check_intf_traffic_bo_counters(): st.log("RETRY tx_ok xounter value on DUT Egress ports : {} {} {} {}".format(p1_txmt, p2_txmt, p3_txmt, p4_txmt)) if (abs(int(float(p2_txmt))) == 0) | (abs(int(float(p3_txmt))) == 0) | (abs(int(float(p4_txmt))) == 0): - output = st.show(dut1, "show arp") + st.show(dut1, "show arp") return False else: return True @@ -478,8 +478,8 @@ def l3_max_route_max_path_scaling_tc(max_paths, max_routes, use_config_file, fam bgpfeature.create_bgp_router(dut2, data.new_as_num, '') dut1_neigh_ip_addr = data.neigh_ip_addr dut2_neigh_ip_addr = data.dut1_start_ip_addr - formatted_dut1_neigh_ip_addr = dut1_neigh_ip_addr.replace("/24","") - formatted_dut2_neigh_ip_addr = dut2_neigh_ip_addr.replace("/24","") + #formatted_dut1_neigh_ip_addr = dut1_neigh_ip_addr.replace("/24","") + #formatted_dut2_neigh_ip_addr = dut2_neigh_ip_addr.replace("/24","") formatted_dut1_neigh_ip_addr = "10.2.2.3" bgpfeature.create_bgp_neighbor(dut, data.as_num, formatted_dut1_neigh_ip_addr, data.remote_as_num) @@ -1010,7 +1010,7 @@ def test_l3_ecmp_4paths_on_bo_tc(): st.config(dut, command) command = "config vlan member add 104 {}".format(member4) st.config(dut, command) - ip_addr = data.start_ip_addr + #ip_addr = data.start_ip_addr ip_addr = "10.2.100.1/24" for index in range(base_range, max_range): command = "config interface ip add "+ "Vlan" + str(index) + " " + ip_addr diff --git a/spytest/tests/sanity/test_sanity_l2.py b/spytest/tests/sanity/test_sanity_l2.py index be5583c09e0..c3953f73941 100644 --- a/spytest/tests/sanity/test_sanity_l2.py +++ b/spytest/tests/sanity/test_sanity_l2.py @@ -595,73 +595,111 @@ def test_base_line_vlan_create_delete_and_mac_learning_with_bum(): "Unknown": data.unknown_mac} mac_incr_cnt = 7 tg_info = {} - new_src_mac = [] - st.log("Reseting and creating the traffic stream.") - tg1.tg_traffic_control(port_handle=tg_ph_1, action='reset') - for mac_addr in mac_addr_list: - st.log("Start '{}' traffic test with destination MAC = {}".format(mac_addr, mac_addr_list[mac_addr])) - new_src_mac.append("00:00:00:00:09:4{}".format(mac_incr_cnt)) - - rv = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', rate_pps=data.rate_pps, - mac_src=new_src_mac[-1], vlan_id=data.vlan, vlan="enable", - transmit_mode="continuous", mac_dst=mac_addr_list[mac_addr], - l2_encap='ethernet_ii_vlan', port_handle2=[tg_ph_1, tg_ph_2, tg_ph_3]) - tg_info[mac_addr] = rv['stream_id'] - mac_incr_cnt += 1 - - st.log("Sending traffic and verifying the stats") - tg1.tg_traffic_control(action='clear_stats', stream_handle=tg_info.values()) - tg1.tg_traffic_control(stream_handle=tg_info.values(), action='run') - st.wait(data.post_wait_time_run) - tg1.tg_traffic_control(stream_handle=tg_info.values(), action='stop') - - traffic_details = { - '1': { - 'tx_ports': [vars.T1D1P1], - 'tx_obj': [tg1], - 'exp_ratio': [2], - 'rx_ports': [vars.T1D1P2, vars.T1D1P3], - 'rx_obj': [tg1, tg1], - 'stream_list': [[tg_info['Unknown']]] - }, - '2': { - 'tx_ports': [vars.T1D1P1], - 'tx_obj': [tg1], - 'exp_ratio': [2], - 'rx_ports': [vars.T1D1P2, vars.T1D1P3], - 'rx_obj': [tg1, tg1], - 'stream_list': [[tg_info['Broadcast']]] - }, - '3': { - 'tx_ports': [vars.T1D1P1], - 'tx_obj': [tg1], - 'exp_ratio': [2], - 'rx_ports': [vars.T1D1P2, vars.T1D1P3], - 'rx_obj': [tg1, tg1], - 'stream_list': [[tg_info['Multicast']]] - } - } - - intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P1) - intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P2) - intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P3) - - st.log('MAC address validation.') - for mac in new_src_mac: - if not mac_obj.verify_mac_address_table(vars.D1, mac, port=vars.D1T1P1): - st.error("MAC '{}' is failed to learn in port = {}".format(mac, vars.D1T1P1)) - assert False - st.log('MAC address validation passed.') - - # Sending BUM traffic from one port and verifying on other two ports. Providing rx_ports as a input to port_handle2 param, the rx count would be double the tx count - result_all = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock',comp_type='packet_count',return_all=1) result = True - for result1, traffic_type in zip(result_all[1], ['Unknown', 'Broadcast', 'Multicast']): - result = result and result1 - if result1: - st.log("Traffic successfully forwarded for the traffic type {}".format(traffic_type)) - else: - st.error("Traffic failed to forwarded for the traffic type {}".format(traffic_type)) + if st.is_soft_tgen(): + for mac_addr in mac_addr_list: + st.log( + "Start '{}' traffic test with destination MAC = {}".format(mac_addr, mac_addr_list[mac_addr])) + new_src_mac = ("00:00:00:00:09:4{}".format(mac_incr_cnt)) + tg1.tg_traffic_control(action='reset') + rv = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', rate_pps=data.rate_pps, + mac_src=new_src_mac, vlan_id=data.vlan, vlan="enable", + transmit_mode="continuous", mac_dst=mac_addr_list[mac_addr], + l2_encap='ethernet_ii_vlan') + tg_info[mac_addr] = rv['stream_id'] + mac_incr_cnt += 1 + tg1.tg_traffic_control(action='clear_stats') + st.log("Sending traffic and verifying the stats") + tg1.tg_traffic_control(stream_handle=tg_info[mac_addr], action='run') + st.wait(data.post_wait_time_run) + tg1.tg_traffic_control(stream_handle=tg_info[mac_addr], action='stop') + traffic_details = { + '1': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg1], + 'exp_ratio': [1], + 'rx_ports': [vars.T1D1P2, vars.T1D1P3], + 'rx_obj': [tg1, tg1] + } + } + intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P1) + intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P2) + intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P3) + st.log('MAC address validation.') + if not mac_obj.verify_mac_address_table(vars.D1, new_src_mac, port=vars.D1T1P1): + st.error("MAC '{}' is failed to learn in port = {}".format(new_src_mac, vars.D1T1P1)) + assert False + st.log('MAC address validation passed.') + result1 = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='aggregate', + comp_type='packet_count') + result = result and result1 + if result1: + st.log("Traffic successfully forwarded for the traffic type {}".format(mac_addr)) + else: + st.error("Traffic failed to forwarded for the traffic type {}".format(mac_addr)) + else: + new_src_mac = [] + st.log("Reseting and creating the traffic stream.") + tg1.tg_traffic_control(port_handle=tg_ph_1, action='reset') + for mac_addr in mac_addr_list: + st.log( + "Start '{}' traffic test with destination MAC = {}".format(mac_addr, mac_addr_list[mac_addr])) + new_src_mac.append("00:00:00:00:09:4{}".format(mac_incr_cnt)) + rv = tg1.tg_traffic_config(port_handle=tg_ph_1, mode='create', rate_pps=data.rate_pps, + mac_src=new_src_mac[-1], vlan_id=data.vlan, vlan="enable", + transmit_mode="continuous", mac_dst=mac_addr_list[mac_addr], + l2_encap='ethernet_ii_vlan', port_handle2=[tg_ph_1, tg_ph_2, tg_ph_3]) + tg_info[mac_addr] = rv['stream_id'] + mac_incr_cnt += 1 + st.log("Sending traffic and verifying the stats") + tg1.tg_traffic_control(action='clear_stats', stream_handle=tg_info.values()) + tg1.tg_traffic_control(stream_handle=tg_info.values(), action='run') + st.wait(data.post_wait_time_run) + tg1.tg_traffic_control(stream_handle=tg_info.values(), action='stop') + traffic_details = { + '1': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg1], + 'exp_ratio': [2], + 'rx_ports': [vars.T1D1P2, vars.T1D1P3], + 'rx_obj': [tg1, tg1], + 'stream_list': [[tg_info['Unknown']]] + }, + '2': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg1], + 'exp_ratio': [2], + 'rx_ports': [vars.T1D1P2, vars.T1D1P3], + 'rx_obj': [tg1, tg1], + 'stream_list': [[tg_info['Broadcast']]] + }, + '3': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [tg1], + 'exp_ratio': [2], + 'rx_ports': [vars.T1D1P2, vars.T1D1P3], + 'rx_obj': [tg1, tg1], + 'stream_list': [[tg_info['Multicast']]] + } + } + intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P1) + intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P2) + intf_obj.show_specific_interface_counters(vars.D1, vars.D1T1P3) + st.log('MAC address validation.') + for mac in new_src_mac: + if not mac_obj.verify_mac_address_table(vars.D1, mac, port=vars.D1T1P1): + st.error("MAC '{}' is failed to learn in port = {}".format(mac, vars.D1T1P1)) + assert False + st.log('MAC address validation passed.') + # Sending BUM traffic from one port and verifying on other two ports. Providing rx_ports as a input to port_handle2 param, the rx count would be double the tx count + result_all = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', + comp_type='packet_count', return_all=1) + for result1, traffic_type in zip(result_all[1], ['Unknown', 'Broadcast', 'Multicast']): + result = result and result1 + if result1: + st.log("Traffic successfully forwarded for the traffic type {}".format(traffic_type)) + else: + st.error("Traffic failed to forwarded for the traffic type {}".format(traffic_type)) if not result: st.error("traffic_verification_failed") assert False diff --git a/spytest/tests/security/test_security_save_reboot.py b/spytest/tests/security/test_security_save_reboot.py index fad32ad7ec8..1452f2ea722 100644 --- a/spytest/tests/security/test_security_save_reboot.py +++ b/spytest/tests/security/test_security_save_reboot.py @@ -3,9 +3,9 @@ from spytest.dicts import SpyTestDict import apis.security.radius as radius import apis.security.tacacs as tacacs +from utilities.utils import ensure_service_params import apis.system.reboot as reboot import apis.system.switch_configuration as switchconf -from utilities.utils import ensure_service_params security_data = SpyTestDict() diff --git a/spytest/tests/security/test_tacacs.py b/spytest/tests/security/test_tacacs.py index 4bf7faadec6..dd82c778572 100644 --- a/spytest/tests/security/test_tacacs.py +++ b/spytest/tests/security/test_tacacs.py @@ -8,6 +8,7 @@ from apis.security.rbac import ssh_call from apis.switching.vlan import clear_vlan_configuration from utilities.utils import ensure_service_params +from utilities.common import poll_wait vars = dict() data = SpyTestDict() @@ -28,8 +29,8 @@ def tacacs_module_hooks(request): data.priority = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "priority") data.timeout = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "timeout") data.auth_type = ensure_service_params(vars.D1, "tacacs", "hosts", 0, "auth_type") - data.tacacs_ser_ip_2 = ensure_service_params(vars.D1, "tacacs", "hosts", 1, "ip") - data.priority_server2 = ensure_service_params(vars.D1, "tacacs", "hosts", 1, "priority") + data.tacacs_ser_ip_2 = ensure_service_params(vars.D1, "tacacs", "hosts", 2, "ip") + data.priority_server2 = ensure_service_params(vars.D1, "tacacs", "hosts", 2, "priority") data.time_out = '10' data.username = 'test' data.password = 'test' @@ -44,6 +45,7 @@ def tacacs_module_hooks(request): data.password1 = 'test' data.rw_user = {'username': data.username, 'password': data.password, 'mode': 'rw'} data.ro_username = ensure_service_params(vars.D1, "radius", "ro_user", "username") + data.ro_password = ensure_service_params(vars.D1, "radius", "ro_user", "password1") ensure_device_ipaddress() tacacs_obj.set_tacacs_server(vars.D1, 'add', data.tacacs_ser_ip_1, data.tcp_port, data.timeout, data.passkey, data.auth_type, data.priority) @@ -147,7 +149,7 @@ def test_ft_tacacs_enable_disable_failthrough(): st.log( "Trying to SSH to the device using TACACS+ credetails when login method set to TACACS+ and local and fail through mode is not enabled") if not ssh_obj.connect_to_device(data.ip_address, data.username, data.password, data.protocol): - debug_info("test_ft_tacacs_enable_disable_failthrough", data.tacacs_ser_ip_1) + debug_info("test_ft_tacacs_enable_disable_failthrough", data.tacacs_ser_ip_2) st.report_fail("Login_to_DUT_via_SSH_is_failed") st.log("Setting login authentication to local and tacacs+") tacacs_obj.set_aaa_authentication_properties(vars.D1, 'login', 'local tacacs+', username=data.username, password=data.password) @@ -174,7 +176,7 @@ def test_ft_tacacs_enable_disable_failthrough(): st.log( "Trying to SSH to the device using TACACS+ credetails when login method set to TACACS+ and local and fail through mode is enabled") if not ssh_obj.connect_to_device(data.ip_address, data.username, data.password, data.protocol, data.ssh_port): - debug_info("test_ft_tacacs_enable_disable_failthrough", data.tacacs_ser_ip_1) + debug_info("test_ft_tacacs_enable_disable_failthrough", data.tacacs_ser_ip_2) st.report_fail("Login_to_DUT_via_SSH_is_failed") st.report_pass("test_case_passed") @@ -212,9 +214,9 @@ def test_ft_rbac_ro_tacacs_cred_ssh(): Author: Sai Durga (pchvsai.durga@broadcom,com) FtOpSoScRBACFn008 Verify that non-admin tacacs user doesn?t have all permissions except show (get) commands when SSH to the system with username/password. ''' - if not st.exec_ssh(vars.D1, data.ro_username, data.password, ['show vlan config']): + if not st.exec_ssh(vars.D1, data.ro_username, data.ro_password, ['show vlan config']): st.report_fail("cmd_not_executed") - if not st.exec_ssh(vars.D1, data.ro_username, data.password, ['sudo config vlan add 1000']): + if not st.exec_ssh(vars.D1, data.ro_username, data.ro_password, ['sudo config vlan add 1000']): st.report_fail("admin_user_root_privilege", "non", "got") st.report_pass("admin_user_root_privilege", "non", "doesnot got") @@ -235,7 +237,7 @@ def test_ft_tacacs_modify_server_parameters(): invalid_timeout = '10' invalid_ip_addr = '10.10.10.1' tacacs_params = st.get_service_info(vars.D1, "tacacs") - tacacs_obj.set_tacacs_server(vars.D1, 'delete', tacacs_params.hosts[1].ip) + tacacs_obj.set_tacacs_server(vars.D1, 'delete', tacacs_params.hosts[2].ip) tacacs_obj.set_tacacs_properties(vars.D1, 'passkey', 'secretstring') st.log("Configuring global tacacs server key with special characters") tacacs_obj.set_tacacs_properties(vars.D1, 'passkey', data.passkey) @@ -254,7 +256,8 @@ def test_ft_tacacs_modify_server_parameters(): data.auth_type, data.priority) st.wait(2, "sync the tacacs server after config changes") st.log("Trying to SSH to the device with TACACS+ server which is configured with the valid parameters") - if not ssh_obj.connect_to_device(data.ip_address, data.username, data.password, data.protocol, data.ssh_port): + if not poll_wait(ssh_obj.connect_to_device, 10, data.ip_address, data.username, + data.password, data.protocol, data.ssh_port): debug_info("test_ft_tacacs_modify_server_parameters", data.tacacs_ser_ip_1) st.report_fail("Login_to_DUT_via_SSH_is_failed") st.report_pass("test_case_passed") diff --git a/spytest/tests/switching/test_portchannel.py b/spytest/tests/switching/test_portchannel.py index 01afc13f65e..dc14868fd9d 100644 --- a/spytest/tests/switching/test_portchannel.py +++ b/spytest/tests/switching/test_portchannel.py @@ -900,7 +900,7 @@ def test_ft_lacp_graceful_restart_with_cold_boot(): st.report_fail('portchannel_member_state_failed') config_save(vars.D2) slog.clear_logging(vars.D2) - [output, exceptions] = exec_all(True, [ExecAllFunc(st.reboot, vars.D2), ExecAllFunc(poll_wait, portchannel_obj.verify_portchannel_details, 10, vars.D1, [data.portchannel_name, data.portchannel_name2], [data.lag_down, data.lag_down], [None, None], [[vars.D1D2P1, vars.D1D2P2], [vars.D1D2P3, vars.D1D2P4]])]) + [output, exceptions] = exec_all(True, [ExecAllFunc(st.reboot, vars.D2), ExecAllFunc(poll_wait, portchannel_obj.verify_portchannel_details, 60, vars.D1, [data.portchannel_name, data.portchannel_name2], [data.lag_down, data.lag_down], [None, None], [[vars.D1D2P1, vars.D1D2P2], [vars.D1D2P3, vars.D1D2P4]])]) ensure_no_exception(exceptions) if False in output: st.report_fail('portchannel_member_state_failed') diff --git a/spytest/tests/switching/test_vlan.py b/spytest/tests/switching/test_vlan.py index 929b70e3b4c..edc6ed3adfe 100644 --- a/spytest/tests/switching/test_vlan.py +++ b/spytest/tests/switching/test_vlan.py @@ -12,6 +12,7 @@ import apis.system.reboot as reboot import apis.common.wait as waitapi import apis.system.basic as basic_obj +import apis.system.snmp as snmp_obj import utilities.utils as utils from utilities.common import poll_wait @@ -41,6 +42,9 @@ def vlan_func_hooks(request): "test_ft_stormcontrol_fast_reboot", "test_ft_stormcontrol_warm_reboot"] if st.get_func_name(request) in bum_test_functions: platform_check() + if request.function.func_name == "test_ft_snmp_max_vlan_scale": + vlan.clear_vlan_configuration(st.get_dut_names(), thread=False, cli_type="click") + portchannel.clear_portchannel_configuration(st.get_dut_names(), thread=True) yield @@ -82,6 +86,11 @@ def vlan_variables(): sc_data.vlan_data = [{"dut": [vars.D1], "vlan_id": sc_data.vlan, "tagged": [vars.D1T1P1, vars.D1T1P2]}] hw_constants_DUT = st.get_datastore(vars.D1, "constants") sc_data.warm_reboot_supported_platforms = hw_constants_DUT['WARM_REBOOT_SUPPORTED_PLATFORMS'] + sc_data.oid_sysName = '1.3.6.1.2.1.1.5.0' + sc_data.ro_community = 'test_community' + sc_data.location = 'hyderabad' + sc_data.oid_dot1qBase = '1.3.6.1.2.1.17.7.1.1' + sc_data.mgmt_int = 'eth0' def vlan_module_prolog(): @@ -325,6 +334,7 @@ def test_ft_vlan_syslog_verify(): vars = st.ensure_min_topology("D1") sc_data.vlan_test = str(random_vlan_list(1, [int(sc_data.vlan)])[0]) result = 1 + slog.clear_logging(vars.D1) st.log("checking vlan count before vlan addition or deletion") count_before_add = slog.get_logging_count(vars.D1, severity="NOTICE", filter_list=["addVlan"]) count_before_delete = slog.get_logging_count(vars.D1, severity="NOTICE", filter_list=["removeVlan"]) @@ -333,9 +343,9 @@ def test_ft_vlan_syslog_verify(): vlan.create_vlan(vars.D1, sc_data.vlan_test) vlan.delete_vlan(vars.D1, sc_data.vlan_test) st.log("checking vlan count after adding vlan") - count_after_add = slog.get_logging_count(vars.D1, severity="NOTICE", filter_list=["addVlan"]) + count_after_add = slog.get_logging_count(vars.D1, severity="NOTICE", filter_list=["addVlan:"]) st.log("vlan count after adding vlan:{}".format(count_after_add)) - count_after_delete = slog.get_logging_count(vars.D1, severity="NOTICE", filter_list=["removeVlan"]) + count_after_delete = slog.get_logging_count(vars.D1, severity="NOTICE", filter_list=["removeVlan:"]) st.log("vlan count after deleting vlan:{}".format(count_after_delete)) if not count_after_add > count_before_add: st.error("vlan log count increamented after adding vlan:{}".format(count_after_add)) @@ -781,3 +791,33 @@ def test_ft_vlan_save_config_warm_and_fast_reboot(): st.report_fail("traffic_transmission_failed", vars.T1D1P1) report_result(status, msg_id) + +@pytest.mark.snmp_hardening +def test_ft_snmp_max_vlan_scale(): + ''' + Author: Prasad Darnasi + verify The BRIDGE-MIB requirements functionality by scaling DUT with max Vlans + ''' + + vlan_module_config(config='yes') + + st.log("Checking VLAN config after reboot") + max_vlan_verify() + global ipaddress + ipaddress_list = basic_obj.get_ifconfig_inet(vars.D1, sc_data.mgmt_int) + st.log("Checking Ip address of the Device ") + if not ipaddress_list: + st.report_env_fail("ip_verification_fail") + ipaddress = ipaddress_list[0] + st.log("Device ip addresse - {}".format(ipaddress)) + snmp_obj.set_snmp_config(vars.D1, snmp_rocommunity=sc_data.ro_community, snmp_location=sc_data.location) + if not snmp_obj.poll_for_snmp(vars.D1, 30, 1, ipaddress=ipaddress, + oid=sc_data.oid_sysName, community_name=sc_data.ro_community): + st.log("Post SNMP config , snmp is not working") + st.report_fail("operation_failed") + basic_obj.get_top_info(vars.D1, proc_name='snmpd') + get_snmp_output = snmp_obj.poll_for_snmp_walk(vars.D1, 15, 1, ipaddress=ipaddress, oid=sc_data.oid_dot1qBase, + community_name=sc_data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") diff --git a/spytest/tests/system/test_interface.py b/spytest/tests/system/test_interface.py index 64d88a86158..52aa7f8ecda 100644 --- a/spytest/tests/system/test_interface.py +++ b/spytest/tests/system/test_interface.py @@ -32,7 +32,7 @@ def interface_module_hooks(request): st.log("Creating TG streams") intf_data.streams = {} - stream = intf_data.tg.tg_traffic_config(port_handle=intf_data.tg_ph_1, mode='create', + stream = intf_data.tg.tg_traffic_config(port_handle=intf_data.tg_ph_1, mode='create', port_handle2=intf_data.tg_ph_2, length_mode='fixed', rate_pps=100, frame_size=intf_data.mtu1, l2_encap='ethernet_ii_vlan', transmit_mode='single_burst', pkts_per_burst=100, vlan_id=intf_data.vlan_id, @@ -41,7 +41,7 @@ def interface_module_hooks(request): st.log('Stream output:{}'.format(stream)) intf_data.streams['mtu1'] = stream['stream_id'] - stream = intf_data.tg.tg_traffic_config(port_handle=intf_data.tg_ph_1, mode='create', + stream = intf_data.tg.tg_traffic_config(port_handle=intf_data.tg_ph_1, mode='create', port_handle2=intf_data.tg_ph_2, length_mode='fixed', rate_pps=100, frame_size=intf_data.mtu2, l2_encap='ethernet_ii_vlan', transmit_mode='single_burst', pkts_per_burst=100, vlan_id=intf_data.vlan_id, @@ -81,12 +81,12 @@ def initialize_variables(): intf_data.ip_address = '11.11.11.11' intf_data.ip_address1 = "11.11.11.9" intf_data.mask = "24" + intf_data.mtu = '2000' intf_data.mtu1 = '4096' intf_data.mtu2 = '9216' intf_data.source_mac = "00:00:02:00:00:01" intf_data.destination_mac = "00:00:01:00:00:01" intf_data.vlan_id = str(random_vlan_list()[0]) - intf_data.mtu = '2000' intf_data.mtu_default = intfapi.get_interface_property(vars.D1, vars.D1T1P1, 'mtu')[0] intf_data.wait_sec = 10 @@ -145,28 +145,23 @@ def test_ft_port_frame_fwd_diff_mtu(): st.log("Configuring MTU values for each interface") intfapi.interface_properties_set(vars.D1, [vars.D1T1P1, vars.D1T1P2], 'mtu', intf_data.mtu1) - intf_data.tg.tg_traffic_control(action='run', stream_handle=intf_data.streams['mtu1']) - st.wait(intf_data.wait_sec) - intf_data.tg.tg_traffic_control(action='stop', stream_handle=intf_data.streams['mtu1']) - st.log("Fetching TGen statistics") - stats_tg1 = tgapi.get_traffic_stats(intf_data.tg, mode="aggregate", port_handle=intf_data.tg_ph_1) - total_tx_tg1 = stats_tg1.tx.total_bytes - stats_tg2 = tgapi.get_traffic_stats(intf_data.tg, mode="aggregate", port_handle=intf_data.tg_ph_2) - total_rx_tg2 = stats_tg2.rx.total_bytes - percentage_98_total_tx_tg1 = (98 * int(total_tx_tg1)) / 100 - st.banner("Sent bytes: {} and Received bytes : {}".format(percentage_98_total_tx_tg1, total_rx_tg2)) - if int(percentage_98_total_tx_tg1) <= 0 and int(percentage_98_total_tx_tg1) <= int(total_rx_tg2): - st.report_fail("traffic_transmission_failed", vars.T1D1P1) - - intf_data.tg.tg_traffic_control(action='run', stream_handle=intf_data.streams['mtu2']) - st.wait(intf_data.wait_sec) - intf_data.tg.tg_traffic_control(action='stop', stream_handle=intf_data.streams['mtu2']) - + intf_data.tg.tg_traffic_control(action='run', stream_handle=[intf_data.streams['mtu1'], intf_data.streams['mtu2']]) + st.wait(2) + intf_data.tg.tg_traffic_control(action='stop', stream_handle=[intf_data.streams['mtu1'], intf_data.streams['mtu2']]) st.log("Fetching TGen statistics") - stats_tg2 = tgapi.get_traffic_stats(intf_data.tg, mode="aggregate", port_handle=intf_data.tg_ph_2) - total_rx_tg2 = stats_tg2.rx.total_packets - st.banner("Received packets : {}" .format(total_rx_tg2)) - if int(total_rx_tg2) >= 100: + traffic_details = { + '1': { + 'tx_ports': [vars.T1D1P1], + 'tx_obj': [intf_data.tg], + 'exp_ratio': [[1, 0]], + 'rx_ports': [vars.T1D1P2], + 'rx_obj': [intf_data.tg], + 'stream_list': [[intf_data.streams['mtu1'], intf_data.streams['mtu2']]], + }, + } + streamResult = tgapi.validate_tgen_traffic(traffic_details=traffic_details, mode='streamblock', + comp_type='packet_count') + if not streamResult: st.report_fail("traffic_transmission_failed", vars.T1D1P1) st.report_pass("test_case_passed") diff --git a/spytest/tests/system/test_lldp.py b/spytest/tests/system/test_lldp.py index efa927e3af6..0747a52ccd3 100644 --- a/spytest/tests/system/test_lldp.py +++ b/spytest/tests/system/test_lldp.py @@ -43,6 +43,7 @@ def global_vars(): data.oid_locmanaddrentry = '1.0.8802.1.1.2.1.3.8.1' data.oid_configmanaddrtable = '1.0.8802.1.1.2.1.1.7' data.oid_configmanaddrentry = '1.0.8802.1.1.2.1.1.7.1' + data.oid_lldp_rem_man_addr_table = '1.0.8802.1.1.2.1.4.2' data.filter = '-Oqv' def lldp_snmp_pre_config(): @@ -53,6 +54,7 @@ def lldp_snmp_pre_config(): global ipaddress global lldp_value_remote, lldp_value_gran global lldp_total_value + #st.exec_each([vars.D1, vars.D2], lldp_obj.lldp_config, status="rx-and-tx") data.ipaddress_d1 = basic_obj.get_ifconfig_inet(vars.D1, data.mgmt_int) data.ipaddress_d2 = basic_obj.get_ifconfig_inet(vars.D2, data.mgmt_int) if not data.ipaddress_d1: @@ -300,6 +302,22 @@ def test_ft_lldp_lldplocportdesc(): st.log(" LLDP value is passed ") st.report_pass("test_case_passed") +@pytest.mark.lldp_remote +@pytest.mark.community +def test_ft_lldp_rem_man_addr_table(): + """ + Author : Prasad Darnasi + Verify the syntax check of the object LLDPRemManAddrTable. + Reference Test Bed : D1 <---> D2 + """ + snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid= data.oid_lldp_rem_man_addr_table, + community_name=data.ro_community,filter=data.filter) + if not snmp_output: + st.report_fail(" No SNMP Entries are available") + + st.log(" LLDP value is passed ") + st.report_pass("test_case_passed") + @pytest.mark.lldp_non_default_config @pytest.mark.community diff --git a/spytest/tests/system/test_sflow.py b/spytest/tests/system/test_sflow.py new file mode 100644 index 00000000000..ef9ac64f162 --- /dev/null +++ b/spytest/tests/system/test_sflow.py @@ -0,0 +1,1134 @@ +# This file contains the list of sFlow tests +# Author: Vennapusa Sreenivasula Reddy (sreenivasula.reddy@broadcom.com) + +import pytest + +from spytest import st, tgapi, SpyTestDict + +import apis.routing.ip as ipfeature +from apis.routing.arp import show_arp +from apis.system.basic import get_ifconfig_ether,get_ifconfig,service_operations_by_systemctl,poll_for_system_status,verify_service_status +import apis.system.sflow as sflow +from apis.system.logging import show_logging +from apis.system.interface import clear_interface_counters,show_interface_counters_detailed, interface_status_show +from apis.switching.portchannel import add_del_portchannel_member,delete_all_portchannels,create_portchannel,\ + clear_portchannel_configuration, get_portchannel_list +from apis.system.reboot import config_save +from apis.system.gnmi import gnmi_get,gnmi_set + +from utilities.common import filter_and_select +from utilities.parallel import exec_all, exec_foreach +from utilities.utils import util_ip_addr_to_hexa_conv,util_int_to_hexa_conv,ensure_service_params + +data = SpyTestDict() + +def initialize_variables(): + data.clear() + data.dut_list = [vars.D1, vars.D2] + data.af_ipv4 = "ipv4" + data.af_ipv6 = "ipv6" + data.collector_name_1 = "collector_1" + data.collector_name_2 = "collector_2" + data.unreachable_sflow_collector_IP = "1.1.1.1" + data.shell_sonic = "sonic" + data.tg_mac1 = "00:00:00:00:00:0A" + data.tg_mac2 = "00:00:00:00:00:0B" + data.tg_mac3 = "00:00:00:00:00:0C" + data.shell_vtysh = "vtysh" + data.ip4_addr = ["192.168.1.1", "192.168.1.2", "192.168.2.1", "192.168.2.2", "192.168.3.1", "192.168.3.3", + "192.168.4.1", "192.168.4.4"] + data.ip6_addr = ["2001::1", "2001::2","2001::3"] + data.static_ip_rt_tg2 = "192.168.3.0/24" + data.static_ip_rt_tg1 = "192.168.1.0/24" + data.ip_protocol = "17" + data.members_dut2 = [vars.D2T1P2] + data.non_default_udp_port = "4451" + data.default_udp_port = "6343" + data.non_default_sampling_rate = 2000 + data.default="default" + data.non_default="non-default" + data.polling_interval="polling interval" + data.sampling_rate="sampling rate" + data.portchannel_name = "PortChannel7" + data.vrf_name = "Vrf_test_01" + data.address_type_ipv4 = "1" + data.address_type_ipv6 = "2" + data.sflow_flow_sample_version = "1" + data.sflow_counter_sample_version = "2" + data.invalid_interface = "Ethernet1001" + data.invalid_data_string = "BCMSONIC" + data.invalid_ip_address_1 = "0.0.0.0" + data.invalid_ip_address_2 = "239.1.1.1" + data.invalid_ip_address_3 = "255.255.255.255" + data.pps_1 = 100000 + data.pps_2 = 5000 + data.capture_support = True + if st.is_soft_tgen(): + data.capture_support = False + data.pps_2 = 200 + data.non_default_sampling_rate = 256 + data.agent_id = util_ip_addr_to_hexa_conv(data.ip4_addr[6]) + data.hex_address_type_v4 = util_int_to_hexa_conv(data.address_type_ipv4,z_fill=7) + data.hex_address_type_v6 = util_int_to_hexa_conv(data.address_type_ipv6,z_fill=7) + data.hex_flow_sample = util_int_to_hexa_conv(data.sflow_flow_sample_version,z_fill=7) + data.hex_counter_sample = util_int_to_hexa_conv(data.sflow_counter_sample_version,z_fill=7) + data.hex_sampling_rate = util_int_to_hexa_conv(data.non_default_sampling_rate,z_fill=7) + +def validate_packet_capture(**kwargs): + if not data.capture_support: return True + return tgapi.validate_packet_capture(**kwargs) + +def debug_dump(msg=""): + st.banner("SFLOW configuration {}".format(msg)) + sflow.show(vars.D2) + sflow.show_interface(vars.D2) + +@pytest.fixture(scope="module", autouse=True) +def sflow_module_hooks(request): + global vars + vars = st.ensure_min_topology("D1T1:1", "D2T1:2", "D1D2:1") + initialize_variables() + [output, _] = exec_all(True, [[interface_status_show, vars.D1, [vars.D1T1P1, vars.D1D2P1, vars.D1D2P2]], + [interface_status_show, vars.D2, [vars.D2T1P1, vars.D2D1P1, vars.D2D1P2]]]) + dut1_intf_counters, dut2_intf_counters = output + data.port_speed = {vars.D1:{vars.D1T1P1:None, vars.D1D2P1:None, vars.D1D2P2:None}, + vars.D2:{vars.D2T1P1:None, vars.D2D1P1:None, vars.D2D1P2:None}} + for port in [vars.D1T1P1, vars.D1D2P1, vars.D1D2P2]: + data.port_speed[vars.D1][port] = filter_and_select(dut1_intf_counters, ['speed'], {'interface': port})[0]['speed'].replace('G', '000') + for port in [vars.D2T1P1, vars.D2D1P1, vars.D2D1P2]: + data.port_speed[vars.D2][port] = filter_and_select(dut2_intf_counters, ['speed'], {'interface': port})[0]['speed'].replace('G', '000') + st.log("The port speed info is: {}".format(data.port_speed)) + data.dut_rt_int_mac1 = get_ifconfig_ether(vars.D1, vars.D1T1P1) + exec_all(True, [[tg_prolog], [sflow_module_prolog]], first_on_main=True) + yield + exec_all(True, [[tg_epilog], [sflow_module_epilog]], first_on_main=True) + + +@pytest.fixture(scope="function", autouse=True) +def sflow_func_hooks(request): + debug_dump("Function Prolog") + yield + debug_dump("Function Epilog") + if st.get_func_name(request) == "test_ft_sflow_sampling_v4_sFlow_collector": + sflow.config_attributes(vars.D2, sample_rate=data.port_speed[vars.D2][vars.D2D1P1], interface_name=vars.D2D1P1, no_form=True) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=data.non_default_udp_port, action="del") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=data.default_udp_port, action="add") + if st.get_func_name(request) == "test_ft_sflow_sampling_v6_sFlow_collector": + module_config_retain() + + + +def config_routing_interfaces(dut, params): + if dut in params: + for data in params[dut]: + if not ipfeature.config_ip_addr_interface(dut, data["interface"], data["ip_address"], data["subnet"], + data["family"]): + if data["family"] == "ipv4": + st.report_fail("ip_routing_int_create_fail", data["interface"]) + elif data["family"] == "ipv6": + st.report_fail("ip6_routing_int_create_fail", data["interface"]) + + +def sflow_module_prolog(): + debug_dump("Module Prolog Start") + st.log("Routing configuration on all duts") + params = { + vars.D1: [{"ip_address": data.ip4_addr[1], "interface": vars.D1T1P1, "subnet": 24, "family": data.af_ipv4}, + {"ip_address": data.ip4_addr[2], "interface": vars.D1D2P1, "subnet": 24, "family": data.af_ipv4}], + vars.D2: [{"ip_address": data.ip4_addr[3], "interface": vars.D2D1P1, "subnet": 24, "family": data.af_ipv4}, + {"ip_address": data.ip4_addr[4], "interface": vars.D2T1P1, "subnet": 24, "family": data.af_ipv4}, + {"ip_address": data.ip4_addr[6], "interface": vars.D2T1P2, "subnet": 24, "family": data.af_ipv4}, + {"ip_address": data.ip6_addr[0], "interface": vars.D2T1P2, "subnet": 64, "family": data.af_ipv6}]} + exec_foreach(True, data.dut_list, config_routing_interfaces, params) + st.log("Adding static routes on D1 and D2") + exec_all(True, [[ipfeature.create_static_route, vars.D1, data.ip4_addr[3], data.static_ip_rt_tg2, data.shell_vtysh, data.af_ipv4], + [ipfeature.create_static_route, vars.D2, data.ip4_addr[2], data.static_ip_rt_tg1, data.shell_vtysh, data.af_ipv4]]) + st.log("sFlow MODULE configuration - INIT") + sflow.enable_disable_config(vars.D2, interface=False, interface_name=None, action="enable") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=None, action="add") + st.log("Configuring non reachable sFlow Collector") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, + ip_address=data.unreachable_sflow_collector_IP, + action="add") + sflow.add_del_agent_id(vars.D2, interface_name=vars.D2T1P2, action="add") + st.log("About to configure sFlow polling interval to non default value 0 means stopping sflow counter samples") + sflow.config_attributes(vars.D2, polling_interval="0") + debug_dump("Module Prolog End") + + +def sflow_module_epilog(): + debug_dump("Module Epilog Start") + sflow.enable_disable_config(vars.D2, interface=False, interface_name=None, action="disable") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1,ip_address=data.ip4_addr[7], + port_number=None, action="del") + sflow.add_del_agent_id(vars.D2, action="del") + st.log("sFlow MODULE configuration - CLEAN UP") + ipfeature.clear_ip_configuration(data.dut_list, 'all') + clear_portchannel_configuration([vars.D2]) + exec_all(True, [[ipfeature.delete_static_route, vars.D1,data.ip4_addr[3], data.static_ip_rt_tg2], + [ipfeature.delete_static_route, vars.D2,data.ip4_addr[2], data.static_ip_rt_tg1]]) + debug_dump("Module Epilog End") + + +def verify_sflow_config(): + st.log("About to verify sFlow configuration") + report_flag = 0 + filter_data = [{'collector_ip': data.unreachable_sflow_collector_IP, 'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2, 'port': data.default_udp_port}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + # st.report_tc_fail("FtOpSfFn006","failed_to_config_sflow") + report_flag = 1 + if report_flag: + st.report_tc_fail("FtOpSfFn006", "failed_to_config_sflow") + else: + st.report_tc_pass("FtOpSfFn006","sflow_test_case_passed") + report_flag = 0 + if not show_logging(vars.D2, filter_list="Starting hsflowd service", lines=None): + # st.report_tc_fail("FtOpSfFn02","failed_to_generate_sflow_log_in_syslog") + report_flag = 1 + if report_flag: + st.report_tc_fail("FtOpSfFn02","failed_to_generate_sflow_log_in_syslog") + else: + st.report_tc_pass("FtOpSfFn020", "sflow_test_case_passed") + + +def tg_prolog(): + global h1, h2, h3, tr1, tg, tg_ph_1, tg_ph_2, tg_ph_3, tg_handler + tg_handler = tgapi.get_handles(vars, [vars.T1D1P1, vars.T1D2P1, vars.T1D2P2]) + tg = tg_handler["tg"] + tg_ph_1 = tg_handler["tg_ph_1"] + tg_ph_2 = tg_handler["tg_ph_2"] + tg_ph_3 = tg_handler["tg_ph_3"] + tgapi.traffic_action_control(tg_handler, actions=["reset", "clear_stats"]) + st.log("Routing configuration on all TGs") + h1 = tg.tg_interface_config(port_handle=tg_ph_1, mode='config', intf_ip_addr=data.ip4_addr[0], + gateway=data.ip4_addr[1], src_mac_addr=data.tg_mac1, arp_send_req='1') + h2 = tg.tg_interface_config(port_handle=tg_ph_2, mode='config', intf_ip_addr=data.ip4_addr[5], + gateway=data.ip4_addr[4], src_mac_addr=data.tg_mac2, arp_send_req='1') + h3 = tg.tg_interface_config(port_handle=tg_ph_3, mode='config', intf_ip_addr=data.ip4_addr[7], + gateway=data.ip4_addr[6], src_mac_addr=data.tg_mac3, arp_send_req='1') + tg.tg_interface_config(port_handle=tg_ph_3, mode='config', ipv6_intf_addr=data.ip6_addr[1], + ipv6_prefix_length='64', ipv6_gateway=data.ip6_addr[0], arp_send_req='1') + st.log("stream configuration on all TG's") + tr1 = tg.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='continuous', pkts_per_burst=data.pps_1, + ip_protocol=data.ip_protocol, length_mode='fixed', rate_pps=data.pps_1, l3_protocol='ipv4', mac_src=data.tg_mac1, + mac_dst=data.dut_rt_int_mac1, ip_src_addr=data.ip4_addr[0], ip_dst_addr=data.ip4_addr[5], l4_protocol='tcp', + high_speed_result_analysis=0) + +def tg_epilog(): + tg.tg_interface_config(port_handle=tg_ph_1, handle=h1['handle'], mode='destroy') + tg.tg_interface_config(port_handle=tg_ph_2, handle=h2['handle'], mode='destroy') + tg.tg_interface_config(port_handle=tg_ph_3, handle=h3['handle'], mode='destroy') + +def module_config_retain(): + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip6_addr[1], + port_number=None, action="del") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=data.default_udp_port, action="add") + sflow.config_attributes(vars.D2, polling_interval="0") + +def poch_config_remove(): + ipfeature.config_ip_addr_interface(vars.D2, interface_name=data.portchannel_name, ip_address=data.ip4_addr[6] + , subnet='24', family="ipv4", config='remove') + add_del_portchannel_member(vars.D2, portchannel=data.portchannel_name, + members=data.members_dut2, flag="del") + delete_all_portchannels(vars.D2) + ipfeature.config_ip_addr_interface(vars.D2, interface_name=vars.D2T1P2, ip_address=data.ip4_addr[6], + subnet="24", family="ipv4", config="add") + + ipfeature.config_ip_addr_interface(vars.D2, interface_name=vars.D2T1P2, ip_address=data.ip6_addr[0], subnet="64", + family="ipv6", config="add") + if not ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4"): + show_arp(vars.D2) + ipfeature.show_ip_route(vars.D2) + + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_sampling_v4_sFlow_collector +def test_ft_sflow_sampling_v4_sFlow_collector(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn003 : Verify sFlow sampling functionality through Front end port on + physical interfaces using IPv4 connectivity to sFlow collector. + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + verify_sflow_config() + report_flag, module_flag = 0, 0 + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + + st.banner("About to check initial psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val1 = sflow_samples_init.get("psample_cb", -1) + sflow.hsflowd_status(vars.D2) + if not ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4"): + show_arp(vars.D2) + ipfeature.show_ip_route(vars.D2) + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.wait(2) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.wait(1) + + st.banner('About to validate sflow packet received by sflow collector', width=100) + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[46, 50, 70], + value_list=[data.hex_address_type_v4, data.agent_id, data.hex_flow_sample]) + if not capture_result: + st.report_msg("failed_to_receive_sflow_packets", data.default,data.sampling_rate) + report_flag, module_flag = 1, 1 + + st.banner("About to validate psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val2 = sflow_samples_init.get("psample_cb", -1) + if psample_cb_val1 == -1 or psample_cb_val2 == -1 or (psample_cb_val2 < psample_cb_val1): + st.log("PSAMPLE CB verification failed") + psample_sflow_stats = int(psample_cb_val2) - int(psample_cb_val1) + if psample_sflow_stats <= 3: + st.report_msg("failed_to_receive_sflow_packets", data.default, data.sampling_rate) + report_flag, module_flag = 1, 1 + + st.banner('About to validate sflow collector interface counters', width=100) + ing_count = show_interface_counters_detailed(vars.D2, vars.D2T1P2) + if ing_count: + st.log("ingress_sample_count_128_255 ={} ".format(ing_count[0]['pkt_tx_128_255_octets'])) + st.log("ingress_sample_count_256_511 ={} ".format(ing_count[0]['pkt_tx_256_511_octets'])) + st.log("ingress_sample_count_512_1023 ={} ".format(ing_count[0]['pkt_tx_512_1023_octets'])) + st.log("ingress_sample_count_1024_1518 ={} ".format(ing_count[0]['pkt_tx_1024_1518_octets'])) + if (int(str(ing_count[0]['pkt_tx_128_255_octets']).replace(',', '')) + int(str(ing_count[0]['pkt_tx_256_511_octets']).replace(',', '')) + + int(str(ing_count[0]['pkt_tx_512_1023_octets'])) + int(str(ing_count[0]['pkt_tx_1024_1518_octets']).replace(',', ''))) < 4: + st.report_msg("failed_to_receive_sflow_packets", data.default, data.sampling_rate) + report_flag, module_flag = 1, 1 + if report_flag: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + st.report_tc_fail("FtOpSfFn003","failed_to_receive_sflow_packets", data.default, data.sampling_rate) + st.report_tc_fail("FtOpSfFn010","failed_to_receive_sflow_packets", data.default, data.sampling_rate) + st.report_tc_fail("FtOpSfFn024","failed_to_receive_sflow_packets", data.default, data.sampling_rate) + else: + st.report_tc_pass("FtOpSfFn003", "sflow_test_case_passed") + st.report_tc_pass("FtOpSfFn010","sflow_test_case_passed") + st.report_tc_pass("FtOpSfFn024","sflow_test_case_passed") + + report_flag = 0 + st.banner('About to send udp data traffic with non default sampling rate and non default udp collector port', + width=100) + st.banner('About to Config sFlow sampling rate to non default value', width=100) + sflow.config_attributes(vars.D2, sample_rate=data.non_default_sampling_rate, interface_name=vars.D2D1P1) + # if not sflow.verify_interface(vars.D2, interface_name=vars.D2D1P1, sampling_rate=data.non_default_sampling_rate, + # cli_type=data.sflow_cli_type): + # st.report_fail("failed_to_config_non_default_sampling_rate") + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + st.banner('About to check initial psample stats', width=100) + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val1 = sflow_samples_init.get("psample_cb", -1) + st.banner('About to config non default udp collector port',width = 100) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=None, action="del") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=data.non_default_udp_port, action="add") + sflow.hsflowd_status(vars.D2) + ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4") + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.banner('About to send udp data traffic', width=100) + tr2 = tg.tg_traffic_config(port_handle=tg_ph_1, mode='create', transmit_mode='continuous', pkts_per_burst=data.pps_2, + ip_protocol=data.ip_protocol, length_mode='fixed', rate_pps=data.pps_2, l3_protocol='ipv4', mac_src=data.tg_mac1, + mac_dst=data.dut_rt_int_mac1, ip_src_addr=data.ip4_addr[0], ip_dst_addr=data.ip4_addr[5], l4_protocol='udp', + high_speed_result_analysis=0) + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr2['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr2['stream_id']) + st.wait(2) + st.log("Checking the data stats for traffic flow") + tg_1_stats = tgapi.get_traffic_stats(tg, mode='aggregate', port_handle=tg_handler["tg_ph_1"]) + tg_2_stats = tgapi.get_traffic_stats(tg, mode='aggregate', port_handle=tg_handler["tg_ph_2"]) + total_tx_tg1 = tg_1_stats.tx.total_packets + total_rx_tg2 = tg_2_stats.rx.total_packets + tx_tg1_95_precentage = (95 * int(total_tx_tg1)) / 100 + st.log("total_tx_tg1={}".format(total_tx_tg1)) + st.log("total_rx_tg2={}".format(total_rx_tg2)) + st.log("tx_tg1_95_precentage={}".format(tx_tg1_95_precentage)) + if not tx_tg1_95_precentage <= total_rx_tg2: + st.log("traffic_verification_failed") + + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + # st.banner('About to validate sflow packet received by sflow collector', width=100) + # pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + # capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + # offset_list=[46, 50, 70, 86], + # value_list=[data.hex_address_type_v4, data.agent_id, + # data.hex_flow_sample,data.hex_sampling_rate]) + # if not capture_result: + # st.report_msg("failed_to_receive_sflow_packets", data.non_default,data.sampling_rate) + # report_flag, module_flag = 1, 1 + st.banner('About to validate psample stats', width=100) + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val2 = sflow_samples_init.get("psample_cb", -1) + if psample_cb_val1 == -1 or psample_cb_val2 == -1 or (psample_cb_val2 < psample_cb_val1): + st.log("PSAMPLE CB verification failed") + psample_sflow_stats = int(psample_cb_val2) - int(psample_cb_val1) + if psample_sflow_stats <= 9: + st.report_msg("failed_to_receive_sflow_packets", data.non_default, data.sampling_rate) + report_flag, module_flag = 1, 1 + st.banner('About to validate sflow collector interface counters', width=100) + ing_count = show_interface_counters_detailed(vars.D2, vars.D2T1P2) + if ing_count: + st.log("ingress_sample_count_128_255 ={} ".format(ing_count[0]['pkt_tx_128_255_octets'])) + st.log("ingress_sample_count_256_511 ={} ".format(ing_count[0]['pkt_tx_256_511_octets'])) + st.log("ingress_sample_count_512_1023 ={} ".format(ing_count[0]['pkt_tx_512_1023_octets'])) + st.log("ingress_sample_count_1024_1518 ={} ".format(ing_count[0]['pkt_tx_1024_1518_octets'])) + if (int(str(ing_count[0]['pkt_tx_128_255_octets']).replace(',', '')) + int(str(ing_count[0]['pkt_tx_256_511_octets']).replace(',', '')) + + int(str(ing_count[0]['pkt_tx_512_1023_octets']).replace(',', '')) + int(str(ing_count[0]['pkt_tx_1024_1518_octets']).replace(',', ''))) < 9: + st.report_msg("failed_to_receive_sflow_packets", data.non_default, data.sampling_rate) + report_flag, module_flag = 1, 1 + ## + if report_flag: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + st.report_tc_fail("FtOpSfFn007","failed_to_receive_sflow_packets", data.non_default, data.sampling_rate) + st.report_tc_fail("FtOpSfFn022", "failed_to_receive_sflow_packets", data.non_default, data.sampling_rate) + else: + st.report_tc_pass("FtOpSfFn007","sflow_test_case_passed") + st.report_tc_pass("FtOpSfFn022", "sflow_test_case_passed") + + if module_flag: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + st.report_fail("failed_to_receive_sflow_packets", data.non_default, data.sampling_rate) + else: + st.report_pass("sflow_test_case_passed") + + + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_sampling_v4_sFlow_collector +def test_ft_sflow_sampling_v6_sFlow_collector(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn004 : Verify sFlow sampling functionality through Front end port on physical interfaces + using IPV6 connectivity to sFlow collector. + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + |(2001::1/64) + | + |(2001::2 + |(192.168.4.4/24) + TG3(sFlow collector) + """ + report_flag, module_flag = 0, 0 + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=data.default_udp_port, action="del") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip6_addr[1], + port_number=None, action="add") + sflow.enable_disable_config(vars.D2, interface=True, interface_name=vars.D2D1P1, action="disable") + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + sflow.psample_stats(vars.D2, ['psample_cb']) + sflow.hsflowd_status(vars.D2) + if not ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4"): + show_arp(vars.D2) + ipfeature.show_ip_route(vars.D2) + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.banner('About to validate sflow packet received by sflow collector') + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[50, 70], + value_list=[data.hex_address_type_v6, data.agent_id]) + if capture_result: + st.report_msg("sflow_negative_verifcation_failed") + report_flag, module_flag = 1, 1 + if report_flag: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_tc_fail("FtOpSfFn008","sflow_negative_verifcation_failed") + else: + st.report_tc_pass("FtOpSfFn008","sflow_test_case_passed") + + report_flag = 0 + + #sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip6_addr[1], port_number=None, action="add") + sflow.enable_disable_config(vars.D2, interface=True, interface_name=vars.D2D1P1, action="enable") + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + st.log("About to check initial psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val1 = sflow_samples_init.get("psample_cb", -1) + sflow.hsflowd_status(vars.D2) + ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4") + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.wait(2) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.wait(1) + st.banner('About to validate sflow packet received by sflow collector', width=100) + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[50, 70], + value_list=[data.hex_address_type_v6, data.agent_id]) + if not capture_result: + st.report_msg("failed_to_receive_sflow_packets", data.default,data.sampling_rate) + report_flag, module_flag = 1, 1 + + st.log("About to validate psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val2 = sflow_samples_init.get("psample_cb", -1) + if psample_cb_val1 == -1 or psample_cb_val2 == -1 or (psample_cb_val2 < psample_cb_val1): + st.log("PSAMPLE CB verification failed") + psample_sflow_stats = int(psample_cb_val2) - int(psample_cb_val1) + if psample_sflow_stats <= 3: + st.report_msg("failed_to_receive_sflow_packets", data.default,data.sampling_rate) + report_flag, module_flag = 1, 1 + st.banner('About to validate sflow collector interface counters', width=100) + ing_count = show_interface_counters_detailed(vars.D2, vars.D2T1P2) + if ing_count: + st.log("ingress_sample_count_128_255 ={} ".format(ing_count[0]['pkt_tx_128_255_octets'])) + st.log("ingress_sample_count_256_511 ={} ".format(ing_count[0]['pkt_tx_256_511_octets'])) + if (int(str(ing_count[0]['pkt_tx_128_255_octets']).replace(',', '')) + int(str(ing_count[0]['pkt_tx_256_511_octets']).replace(',', '')) + + int(str(ing_count[0]['pkt_tx_512_1023_octets']).replace(',', '')) + int(str(ing_count[0]['pkt_tx_1024_1518_octets']).replace(',', ''))) < 4: + st.report_msg("failed_to_receive_sflow_packets", data.default,data.sampling_rate) + report_flag, module_flag = 1, 1 + if report_flag: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + st.report_tc_fail("FtOpSfFn004","failed_to_receive_sflow_packets", data.default, data.sampling_rate) + else: + st.report_tc_pass("FtOpSfFn004", "sflow_test_case_passed") + + if module_flag: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + st.report_fail("failed_to_receive_sflow_packets", data.default,data.sampling_rate) + else: + st.report_pass("sflow_test_case_passed") + + + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_counter_samples_polling_interval +def test_ft_sflow_counter_samples_polling_interval(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn012 : Verify that counter samples are updated for non default polling interval + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + sflow.psample_stats(vars.D2, ['psample_cb']) + st.log("About to configure sFlow polling interval to non default value ") + sflow.config_attributes(vars.D2, polling_interval="30") + st.log("About to verify sFlow polling interval configuration") + filter_data = [{'collector_ip': data.unreachable_sflow_collector_IP, 'polling_interval': '30', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2, 'port': data.default_udp_port}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + module_config_retain() + st.report_fail("failed_to_config_sflow") + + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(32) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + # st.banner('About to validate counter sample sflow packet received by sflow collector') + # pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + # capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + # offset_list=[46, 50, 70], + # value_list=[data.hex_address_type_v4, + # data.agent_id, data.hex_counter_sample]) + # if not capture_result: + # st.report_fail("failed_to_receive_sflow_packets", data.default,data.polling_interval) + ing_count = show_interface_counters_detailed(vars.D2, vars.D2T1P2) + if ing_count: + st.log("ingress_sample_count_128_255 ={} ".format(ing_count[0]['pkt_tx_128_255_octets'])) + st.log("ingress_sample_count_256_511 ={} ".format(ing_count[0]['pkt_tx_256_511_octets'])) + st.log("ingress_sample_count_512_1023 ={} ".format(ing_count[0]['pkt_tx_512_1023_octets'])) + st.log("ingress_sample_count_1024_1518 ={} ".format(ing_count[0]['pkt_tx_1024_1518_octets'])) + if (int(str(ing_count[0]['pkt_tx_128_255_octets']).replace(',', '')) + int(str(ing_count[0]['pkt_tx_256_511_octets']).replace(',', '')) + + int(str(ing_count[0]['pkt_tx_512_1023_octets']).replace(',', ''))+int(str(ing_count[0]['pkt_tx_1024_1518_octets']).replace(',', ''))) <= 9: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + st.report_fail("failed_to_receive_sflow_packets", data.default, data.polling_interval) + st.report_pass("sflow_test_case_passed") + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_max_sflow_collector_config +def test_ft_sflow_max_sflow_collector_config(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn005 : Verify max sFlow collectors configuration with different + combinations (2 IPV4,2 IPV6,combination of both). + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + sflow.config_attributes(vars.D2, polling_interval="0") + sflow.hsflowd_status(vars.D2) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, ip_address=data.unreachable_sflow_collector_IP, + action="del") + sflow.hsflowd_status(vars.D2) + st.banner('About to add IPv4 and IPv6 max sflow collector combination') + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, ip_address=data.ip6_addr[1], + port_number=None, action="add") + filter_data = [{'collector_ip': data.ip6_addr[1], 'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2, 'port': data.default_udp_port}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=data.default_udp_port, action="del") + sflow.hsflowd_status(vars.D2) + st.banner('About to add max IPv6 sflow collectors') + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip6_addr[2], + port_number=None, action="add") + filter_data = [{'collector_ip': data.ip6_addr[1], 'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2, 'port': data.default_udp_port}, + {'port': data.default_udp_port, 'collector_ip': data.ip6_addr[2]}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + + sflow.hsflowd_status(vars.D2) + st.banner('About to delete IPv4 sflow collectors') + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip6_addr[2], + port_number=None, action="del") + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, ip_address=data.ip6_addr[1], + port_number=None, action="del") + filter_data = [{'polling_interval': '0', + 'collectors_cnt': '0', 'state': 'up', 'agent_id': vars.D2T1P2}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + + sflow.hsflowd_status(vars.D2) + st.banner('About to add max IPv4 sflow collectors') + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.ip4_addr[7], + port_number=None, action="add") + st.wait(1) + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, + ip_address=data.unreachable_sflow_collector_IP, + action="add") + filter_data = [{'collector_ip': data.unreachable_sflow_collector_IP, 'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2, 'port': data.default_udp_port}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + + sflow.hsflowd_status(vars.D2) + st.report_pass("sflow_test_case_passed") + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_sampling_sFlow_collector_management_port +def test_ft_sflow_sampling_sFlow_collector_management_port(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn001 : Verify sFlow sampling functionality through management port connectivity to + sFlow collector. + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + sflow_collector_ip = ensure_service_params(vars.D1, "chef", "ip") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, + ip_address=data.unreachable_sflow_collector_IP, action="del") + sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, ip_address=sflow_collector_ip, + port_number=None, action="add") + st.poll_wait(ipfeature.ping, 3, vars.D2, sflow_collector_ip, family="ipv4") + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + eth0_params = get_ifconfig(vars.D2) + init_eth0_param=eth0_params[0]["tx_packets"] + st.log("About to check initial psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val1 = sflow_samples_init.get("psample_cb", -1) + sflow.hsflowd_status(vars.D2) + if not ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4"): + show_arp(vars.D2) + ipfeature.show_ip_route(vars.D2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(12) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.log("Checking the data stats for traffic flow") + tg_1_stats = tgapi.get_traffic_stats(tg, mode='aggregate', port_handle=tg_handler["tg_ph_1"]) + tg_2_stats = tgapi.get_traffic_stats(tg, mode='aggregate', port_handle=tg_handler["tg_ph_2"]) + total_tx_tg1 = tg_1_stats.tx.total_packets + total_rx_tg2 = tg_2_stats.rx.total_packets + tx_tg1_95_precentage = (95 * int(total_tx_tg1)) / 100 + st.log("total_tx_tg1={}".format(total_tx_tg1)) + st.log("total_rx_tg2={}".format(total_rx_tg2)) + st.log("tx_tg1_95_precentage={}".format(tx_tg1_95_precentage)) + if not tx_tg1_95_precentage <= total_rx_tg2: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_fail("traffic_verification_failed") + st.log("About to validate psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val2 = sflow_samples_init.get("psample_cb", -1) + if psample_cb_val1 == -1 or psample_cb_val2 == -1 or (psample_cb_val2 < psample_cb_val1): + st.log("PSAMPLE CB verification failed") + psample_sflow_stats = int(psample_cb_val2) - int(psample_cb_val1) + if psample_sflow_stats <= 3: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_fail("failed_to_receive_sflow_packets", data.default,data.sampling_rate) + eth0_params = get_ifconfig(vars.D2) + fin_eth0_param = eth0_params[0]["tx_packets"] + if (int(fin_eth0_param)-int(init_eth0_param))<=3: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_fail("failed_to_receive_sflow_packets", data.default, data.sampling_rate) + st.report_pass("sflow_test_case_passed") + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_collector_port_poch +def test_ft_sflow_collector_port_poch(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn016 : Verify sFlow functionality when sflow collector interface is + participated in PortChannel + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + ipfeature.delete_ip_interface(vars.D2, interface_name=vars.D2T1P2, ip_address=data.ip4_addr[6], + subnet="24", family="ipv4") + ipfeature.delete_ip_interface(vars.D2, interface_name=vars.D2T1P2, ip_address=data.ip6_addr[0], subnet="64", + family="ipv6") + create_portchannel(vars.D2, portchannel_list=data.portchannel_name, fallback=False, + min_link="", static=True) + add_del_portchannel_member(vars.D2, portchannel=data.portchannel_name, + members=data.members_dut2, flag="add") + ipfeature.config_ip_addr_interface(vars.D2, interface_name=data.portchannel_name, ip_address=data.ip4_addr[6] + , subnet='24', family="ipv4", config='add') + if not ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4"): + show_arp(vars.D2) + ipfeature.show_ip_route(vars.D2) + sflow.get_psample_list_groups(vars.D2) + collector_data = [{'collector_ip': data.ip4_addr[7], 'collector_name': data.collector_name_1}] + if not st.poll_wait(sflow.verify_config, 30, vars.D2, data=collector_data): + poch_config_remove() + st.report_fail("failed_to_config_sflow") + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + sflow.psample_stats(vars.D2, ['psample_cb']) + sflow.hsflowd_status(vars.D2) + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(12) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.wait(2) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.wait(1) + st.banner('About to validate sflow packet received by sflow collector', width=100) + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[46, 50, 70], + value_list=[data.hex_address_type_v4, data.agent_id, data.hex_flow_sample]) + if not capture_result: + get_portchannel_list(vars.D2) + sflow.psample_stats(vars.D2, ['psample_cb']) + sflow.get_psample_list_groups(vars.D2) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + poch_config_remove() + st.report_fail("failed_to_receive_sflow_packets", data.default, data.sampling_rate) + poch_config_remove() + st.report_pass("sflow_test_case_passed") + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_samples_control_plane_traffic +def test_ft_sflow_samples_control_plane_traffic(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn023 : Verify sFlow functionality for control Plane traffic. + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + st.log("About to check initial psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val1 = sflow_samples_init.get("psample_cb", -1) + st.banner('About to Config sFlow sampling rate to non default value', width=100) + sflow.config_attributes(vars.D2, sample_rate="256", interface_name=vars.D2D1P1) + sflow.hsflowd_status(vars.D2) + ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4") + st.log("###### Ping iteration started ########") + for Iter in range(5): + st.log("Iteration count is : {}" .format(Iter)) + ipfeature.ping(vars.D1, data.ip4_addr[5], family="ipv4",count="100") + #ipfeature.ping(vars.D1, data.ip4_addr[5], family="ipv4",count="1000") + st.log("About to validate psample stats") + sflow_samples_init = sflow.psample_stats(vars.D2, ['psample_cb']) + psample_cb_val2 = sflow_samples_init.get("psample_cb", -1) + if psample_cb_val1 == -1 or psample_cb_val2 == -1 or (psample_cb_val2 < psample_cb_val1): + st.log("PSAMPLE CB verification failed") + psample_sflow_stats = int(psample_cb_val2) - int(psample_cb_val1) + if psample_sflow_stats < 1: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + sflow.config_attributes(vars.D2, sample_rate=data.port_speed[vars.D2][vars.D2D1P1], interface_name=vars.D2D1P1, no_form=True) + st.report_fail("sflow_control_plane_traffic_status", "failed") + sflow.config_attributes(vars.D2, sample_rate=data.port_speed[vars.D2][vars.D2D1P1], interface_name=vars.D2D1P1, no_form=True) + st.report_pass("sflow_control_plane_traffic_status","succeeded") + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_func_docker_restart +def test_ft_sflow_docker_restart(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn015 : Verify that sFlow functionality after sflow docker restart + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + st.log("Checking the sflow functionality with docker restart") + service_name = "sflow" + service_operations_by_systemctl(vars.D2, service_name, 'restart') + if not poll_for_system_status(vars.D2, service_name, 15, 1): + st.report_fail("service_not_running", service_name) + if not verify_service_status(vars.D2, service_name): + st.report_fail("lldp_service_not_up") + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + sflow.psample_stats(vars.D2, ['psample_cb']) + sflow.hsflowd_status(vars.D2) + if not ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4"): + show_arp(vars.D2) + ipfeature.show_ip_route(vars.D2) + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.wait(2) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.wait(1) + st.banner('About to validate sflow packet received by sflow collector', width=100) + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[46, 50, 70], + value_list=[data.hex_address_type_v4, data.agent_id, data.hex_flow_sample]) + if capture_result: + st.report_pass("sflow_docker_restart_status","succeeded") + else: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_fail("sflow_docker_restart_status", "failed") + + +@pytest.mark.test_ft_sflow_save_reboot +def test_ft_system_config_mgmt_verifying_config_with_save_reboot_sflow(): + st.log("performing Config save") + config_save(vars.D2) + st.log("performing Reboot") + st.reboot(vars.D2) + st.log("verifying sFlow after save and reboot") + filter_data = [{'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + st.log("configuration successfully stored to config_db file after save and reboot") + st.report_pass("test_case_passed") + + +@pytest.mark.test_ft_sflow +@pytest.mark.test_ft_sflow_fast_reboot +def test_ft_sflow_fast_reboot(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn018 : Verify that sFlow functionality after fast reboot + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + """ + st.log("Performing Config save") + config_save(vars.D2) + st.log("Performing fast Reboot") + st.reboot(vars.D2, "fast") + st.log("Verifying sflow configuration post reboot") + st.log("About to verify sFlow configuration") + filter_data = [{'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + st.banner("After reboot and hsflowd is UP, it takes about a min to start sending samples,as the refresh cycle is 60 second") + st.wait(40) + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + sflow.psample_stats(vars.D2, ['psample_cb']) + sflow.hsflowd_status(vars.D2) + ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4") + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.wait(2) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.wait(1) + st.banner('About to validate sflow packet received by sflow collector', width=100) + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[46, 50, 70], + value_list=[data.hex_address_type_v4, data.agent_id, data.hex_flow_sample]) + if capture_result: + st.report_pass("sflow_fast_reboot_status","succeeded") + else: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_fail("sflow_fast_reboot_status", "failed") + + +@pytest.mark.test_ft_sflow_warm_reboot +def test_ft_sflow_warm_reboot(): + """ + Author Details: + Name: Sreenivasula Reddy Vennapusa + Email: sreenivasula.reddy@broadcom.com + Objective - FtOpSfFn019 : Verify that sFlow functionality after warm reboot + Test bed details: + TG1(192.168.1.1)---(192.168.1.2)Partner(192.168.2.1/24)---(192.168.2.2/24)DUT(192.168.3.1)---(192.168.3.3/24)TG2 + |(192.168.4.1/24) + | + | + |(192.168.4.4/24) + TG3(sFlow collector) + After warm reboot and after hsflowd is UP, it takes about a min to start sending samples. + this is the expected behavior as the refresh cycle is 60 second + """ + st.log("Performing Config save") + config_save(vars.D2) + st.log("Performing warm Reboot") + st.reboot(vars.D2, "warm") + st.log("Verifying sflow configuration post reboot") + st.log("About to verify sFlow configuration") + filter_data = [{'polling_interval': '0', + 'collectors_cnt': '2', 'state': 'up', 'agent_id': vars.D2T1P2}, + {'port': data.default_udp_port, 'collector_ip': data.ip4_addr[7]}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("failed_to_config_sflow") + st.banner("After reboot and hsflowd is UP, it takes about a min to start sending samples,as the refresh cycle is 60 second") + st.wait(40) + exec_all(True, [[clear_interface_counters, vars.D1],[clear_interface_counters, vars.D2]]) + sflow.psample_stats(vars.D2, ['psample_cb']) + sflow.hsflowd_status(vars.D2) + ipfeature.ping_poll(vars.D2, data.ip4_addr[7], family="ipv4", iter=3, count="4") + tg.tg_packet_control(port_handle=tg_ph_3, action='start') + st.wait(2) + tg.tg_traffic_control(action='run', handle=tr1['stream_id']) + st.wait(10) + tg.tg_traffic_control(action='stop', handle=tr1['stream_id']) + st.wait(2) + st.log("stop the capture") + tg.tg_packet_control(port_handle=tg_ph_3, action='stop') + st.wait(1) + st.banner('About to validate sflow packet received by sflow collector', width=100) + pkts_captured = tg.tg_packet_stats(port_handle=tg_ph_3, format='var', output_type='hex') + capture_result = validate_packet_capture(tg_type=tg.tg_type, pkt_dict=pkts_captured, + offset_list=[46, 50, 70], + value_list=[data.hex_address_type_v4, data.agent_id, data.hex_flow_sample]) + if capture_result: + st.report_pass("sflow_warm_reboot_status","succeeded") + else: + sflow.psample_stats(vars.D2, ['psample_cb']) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1T1P1], [show_interface_counters_detailed, vars.D2, vars.D2T1P1]]) + exec_all(True, [[show_interface_counters_detailed, vars.D1, vars.D1D2P1], [show_interface_counters_detailed, vars.D2, vars.D2D1P1]]) + show_interface_counters_detailed(vars.D2, vars.D2T1P2) + st.report_fail("sflow_warm_reboot_status", "failed") + + +@pytest.mark.test_ft_sflow_invalid_config +def test_ft_sflow_invalid_config(): + st.log("About to config invalid interface and invalid string as sflow agent-id") + out = sflow.add_del_agent_id(vars.D1, interface_name=data.invalid_interface, action="add", skip_error_check = True) + cli_type = st.get_ui_type(vars.D1, cli_type='') + if cli_type == 'klish': + if not "%Error" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "agent-id") + elif cli_type == 'click': + if not "Invalid interface name" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "agent-id") + + out = sflow.add_del_agent_id(vars.D1, interface_name=data.invalid_data_string, action="add" , skip_error_check = True) + if cli_type == 'klish': + if not "Error" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "agent-id") + elif cli_type == 'click': + if not "Invalid interface name" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "agent-id") + + + st.log("About to config invalid IPv4 address as sflow collecteor") + out = sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.invalid_ip_address_1, + port_number=None, action="add", skip_error_check = True) + if cli_type == 'klish': + if not "%Error" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "collector address") + elif cli_type == 'click': + if not "Invalid" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "collector address") + + out = sflow.add_del_collector(vars.D2, collector_name=data.collector_name_2, ip_address=data.invalid_ip_address_2, + port_number=None, action="add", skip_error_check = True) + if cli_type == 'klish': + if not "%Error" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "collector address") + elif cli_type == 'click': + if not "Invalid" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "collector address") + + out = sflow.add_del_collector(vars.D2, collector_name=data.collector_name_1, ip_address=data.invalid_ip_address_3, + port_number=None, action="add", skip_error_check = True) + if cli_type == 'klish': + if not "%Error" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "collector address") + elif cli_type == 'click': + if not "Invalid" in out: + st.error("The output is: {}".format(out)) + st.report_fail("sflow_invalid_config_status", "not", "collector address") + st.report_pass("sflow_invalid_config_status","successfully","parameters") + + +@pytest.mark.test_ft_sflow_rest +def test_ft_sflow_rest_conf(): + st.log("sFlow configuration through REST") + try: + sflow.enable_disable_config(vars.D2, interface=False, interface_name=None, action="disable", + cli_type="rest") + st.log("About to configure sFlow polling interval to non default value ") + sflow.config_attributes(vars.D2, polling_interval="90", cli_type="rest") + st.log("About to verify sFlow configuration") + filter_data = [{'polling_interval': '90','state': 'down'}] + if not sflow.verify_config(vars.D2, data=filter_data): + st.report_fail("sflow_REST_config_status", "failed") + st.report_pass("sflow_REST_config_status", "succeeded") + except Exception as e: + st.log(e) + st.report_fail("exception_observed", e) + + +@pytest.mark.test_ft_sflow_gnmi +def test_ft_sflow_gnmi(): + data.get_get_xpath = "/sonic-sflow:sonic-sflow/SFLOW/SFLOW_LIST[sflow_key=global]/admin_state" + data.get_set_xpath = "/sonic-sflow:sonic-sflow/SFLOW/SFLOW_LIST" + data.json_data = {"sonic-sflow:SFLOW_LIST": [{"sflow_key": "global", "admin_state": "up"}]} + gnmi_set_output = gnmi_set(vars.D1, xpath=data.get_set_xpath, json_content=data.json_data) + st.log(gnmi_set_output) + if not gnmi_set_output: + st.report_fail('sflow_gNMI_config_status', "config","set","failed") + if not "op: UPDATE" in str(gnmi_set_output): + st.report_fail('sflow_gNMI_config_status', "config","set","failed") + gnmi_get_output = gnmi_get(vars.D1, xpath = data.get_get_xpath) + st.log(gnmi_get_output) + if not gnmi_get_output: + st.report_fail('sflow_gNMI_config_status', "status","get","failed") + if "sonic-sflow:admin_state" not in str(gnmi_get_output): + st.report_fail('sflow_gNMI_config_status', "status","get","failed") + st.report_pass('sflow_gNMI_config_status', "operation","get and set","successful") + diff --git a/spytest/tests/system/test_snapshot.py b/spytest/tests/system/test_snapshot.py new file mode 100644 index 00000000000..64d3318381a --- /dev/null +++ b/spytest/tests/system/test_snapshot.py @@ -0,0 +1,787 @@ +# Snapshot Feature FT test cases. +# Author : Phani kumar ravula (phanikumar.ravula@broadcom.com) and prudviraj k (prudviraj.kristipati@broadcom.com) + +import pytest +from datetime import datetime + +from spytest import st, tgapi, SpyTestDict +from spytest.utils import random_vlan_list + +import apis.system.snapshot as sfapi +from apis.switching.vlan import create_vlan,clear_vlan_configuration,add_vlan_member,delete_vlan_member,delete_vlan +from apis.system.basic import get_hwsku,get_ifconfig_ether,get_platform_summary +from apis.system.port import get_interface_counters_all,clear_interface_counters +import apis.system.reboot as reboot_api +import apis.qos.cos as cos_api +import apis.system.sflow as sflow + +sf_data = SpyTestDict() + +@pytest.fixture(scope="module", autouse=True) +def snapshot_feature_module_hooks(request): + global vars + vars = st.ensure_min_topology('D1T1:4') + global_vars_and_constants_init() + sf_module_prolog() + yield + sf_module_epilog() + +@pytest.fixture(scope="function", autouse=True) +def snapshot_feature_func_hooks(request): + if (st.get_func_name(request) not in 'test_ft_watermark_telemetry_interval') and (st.get_func_name(request) not in 'test_ft_snapshot_interval'): + clear_interface_counters(vars.D1) + yield + if st.get_func_name(request) == 'test_ft_sf_all_buffer_stats_using_unicast_traffic' or st.get_func_name(request) == 'test_ft_sf_verify_buffer_pool_counters': + sf_tg_traffic_start_stop(sf_data.unicast, False) + sfapi.config_snapshot_interval(vars.D1, snap="clear_snaphot_interval") + if st.get_func_name(request) == 'test_ft_sf_all_buffer_stats_using_multicast_traffic': + sf_tg_traffic_start_stop(sf_data.multicast, False) + sfapi.config_snapshot_interval(vars.D1, snap="clear_snaphot_interval") + if st.get_func_name(request) == 'test_ft_sf_verify_cpu_counters': + sflow.config_attributes(vars.D1, sample_rate=sf_data.sflow_sample_rate, interface_name=vars.D1T1P1, no_form=True) + sflow.enable_disable_config(vars.D1, interface=False, interface_name=None, action="disable") + sf_tg_traffic_start_stop(sf_data.unicast, False) + sfapi.config_snapshot_interval(vars.D1, snap="clear_snaphot_interval") + + +def global_vars_and_constants_init(): + sf_data.clear() + sf_data.tg_port_list = [vars.T1D1P1, vars.T1D1P2, vars.T1D1P3, vars.T1D1P4] + sf_data.port_list = [vars.D1T1P1, vars.D1T1P2, vars.D1T1P3, vars.D1T1P4] + sf_data.default_snapshot_interval = 10 + sf_data.snapshot_interval = 5 + sf_data.telemetry_interval = 30 + sf_data.default_telemetry_interval = 120 + sf_data.unicast = 'unicast' + sf_data.multicast = 'multicast' + sf_data.cpu = 'cpu' + sf_data.periodic = 'periodic' + sf_data.dut_mac = get_ifconfig_ether(vars.D1, 'eth0') + sf_data.tg_current_mode = sf_data.unicast + sf_data.traffic_duration = 5 + sf_data.initial_counter_value = 0 + sf_data.PG=['shared','headroom','unicast','multicast'] + sf_data.group = ['priority-group', 'queue'] + sf_data.table = ['watermark', 'persistent-watermark'] + sf_data.platform = get_hwsku(vars.D1).lower() + sf_data.config_file = "buffers.json" + sf_data.device_j2_file = "buffers.json.j2" + sf_data.vlan1 = 1 + sf_data.reload_interval = 70 + sf_data.FMT = '%H:%M:%S' + sf_data.buffer_pool_tolerance=2080 + sf_data.percentage = ['--percentage', '-p'] + sf_data.dot1p_to_tc_map_dict = {'0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6', '7': '7'} + sf_data.tc_to_pg_map_dict = {'0-7': '7'} + sf_data.obj_name = ['dot1p_tc_map', 'tc_pg_map'] + sf_data.map_name = ['dot1p_to_tc_map', 'tc_to_pg_map'] + sf_data.dot1p_tc_bind_map = {'port': vars.D1T1P1, 'map': sf_data.map_name[0], 'obj_name': sf_data.obj_name[0]} + sf_data.tc_pg_bind_map = {'port': vars.D1T1P1, 'map': sf_data.map_name[1], 'obj_name': sf_data.obj_name[1]} + sf_data.sflow_sample_rate = 256 + return sf_data + +def sf_module_prolog(): + clear_vlan_configuration(vars.D1) + sf_data.vlan = str(random_vlan_list()[0]) + create_vlan(vars.D1, sf_data.vlan) + add_vlan_member(vars.D1, sf_data.vlan, port_list=sf_data.port_list, tagging_mode=True) + sf_data.tg, sf_data.tg_ph_list, sf_data.stream_sf_data = sf_tg_stream_config() + +def sf_module_epilog(): + delete_vlan_member(vars.D1, sf_data.vlan, port_list=sf_data.port_list, tagging_mode=True) + delete_vlan(vars.D1, sf_data.vlan) + +def sf_tg_stream_config(): + global tg_handler + st.log('TG configuration for snapshot tests') + tg_handler = tgapi.get_handles(vars, sf_data.tg_port_list) + tg = tg_handler["tg"] + tg_ph_1 = tg_handler["tg_ph_1"] + tg_ph_2 = tg_handler["tg_ph_2"] + tg_ph_3 = tg_handler["tg_ph_3"] + tg_ph_4 = tg_handler["tg_ph_4"] + tg_ph_list = [tg_ph_1, tg_ph_2, tg_ph_3, tg_ph_4] + + stream_sf_data = {} + + tgapi.traffic_action_control(tg_handler, actions=["reset", "clear_stats"]) + + stream_sf_data['1'] = tg.tg_traffic_config(port_handle=tg_ph_1, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:01", + mac_dst="00:00:00:00:00:05", vlan_id=sf_data.vlan,high_speed_result_analysis= 1, + l2_encap='ethernet_ii_vlan')['stream_id'] + stream_sf_data['2'] = tg.tg_traffic_config(port_handle=tg_ph_2, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:02", + mac_dst="00:00:00:00:00:05", vlan_id=sf_data.vlan,high_speed_result_analysis= 1, + l2_encap='ethernet_ii_vlan')['stream_id'] + stream_sf_data['3'] = tg.tg_traffic_config(port_handle=tg_ph_3, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:03", + mac_dst="00:00:00:00:00:05", vlan_id=sf_data.vlan,high_speed_result_analysis= 1, + l2_encap='ethernet_ii_vlan')['stream_id'] + stream_sf_data['4'] = tg.tg_traffic_config(port_handle=tg_ph_4, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:05", + mac_dst="00:00:00:00:00:01", vlan_id=sf_data.vlan,high_speed_result_analysis= 1, + l2_encap='ethernet_ii_vlan')['stream_id'] + + return tg, tg_ph_list, stream_sf_data + +def sf_tg_traffic_start_stop(traffic_mode, traffic_action): + st.log(">>> Configuring '{}' traffic streams".format(traffic_mode)) + st.debug("TG Streams : Current Mode = {}, Requested Mode = {}".format(sf_data.tg_current_mode, traffic_mode)) + if not sf_data.tg_current_mode == traffic_mode: + sf_data.tg_current_mode = traffic_mode + if traffic_mode == sf_data.multicast: + for each in sf_data.stream_sf_data: + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data[each], + mac_dst="01:82:33:33:33:33") + + elif traffic_mode == sf_data.cpu: + for each in sf_data.stream_sf_data: + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data[each], + mac_dst=sf_data.dut_mac,vlan_id=sf_data.vlan1) + elif traffic_mode == sf_data.periodic: + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['1'],mac_dst="00:00:00:00:00:05",rate_percent=40) + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['2'],mac_dst="00:00:00:00:00:05",rate_percent=40) + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['3'], + mac_dst="00:00:00:00:00:05",rate_percent=40) + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['4'], + mac_dst="00:00:00:00:00:01",rate_percent=40) + else: + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['1'], + mac_dst="00:00:00:00:00:05",rate_percent=100) + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['2'], + mac_dst="00:00:00:00:00:05",rate_percent=100) + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['3'], + mac_dst="00:00:00:00:00:05",rate_percent=100) + sf_data.tg.tg_traffic_config(mode='modify', stream_id=sf_data.stream_sf_data['4'], + mac_dst="00:00:00:00:00:01",rate_percent=100) + + if traffic_action is True: + sf_data.tg.tg_traffic_control(action='run', stream_handle=sf_data.stream_sf_data.values()) + else: + sf_data.tg.tg_traffic_control(action='stop', stream_handle=sf_data.stream_sf_data.values()) + st.wait(1) + +def sf_collecting_debug_logs_when_test_fails(): + get_interface_counters_all(vars.D1) + +def clear_qos_map_config(): + cos_api.clear_port_qos_map_all(vars.D1, sf_data.dot1p_tc_bind_map) + cos_api.clear_qos_map_table(vars.D1, sf_data.dot1p_tc_bind_map) + cos_api.clear_port_qos_map_all(vars.D1, sf_data.tc_pg_bind_map) + cos_api.clear_qos_map_table(vars.D1, sf_data.tc_pg_bind_map) + + +@pytest.mark.snapshot_regression +def test_ft_watermark_telemetry_interval(): + """ + Author : Phani kumar R (phanikumar.ravula@broadcom.com) + """ + result = 0 + st.banner('ft_sf_wm_telemetry_interval') + if not sfapi.config_snapshot_interval(vars.D1, snap="telemetry", interval_val=sf_data.telemetry_interval): + st.error("Failed to configure watermark telemetry interval") + result += 1 + match = [{'telemetryinterval': sf_data.telemetry_interval}] + if not sfapi.verify(vars.D1,'telemetry_interval', verify_list=match): + st.error("Failed to verify the configured watermark telemetry interval value") + result += 1 + if not sfapi.config_snapshot_interval(vars.D1, snap="telemetry", interval_val=sf_data.default_telemetry_interval): + st.error("Failed to configure default watermark telemetry interval") + result += 1 + match = [{'telemetryinterval': sf_data.default_telemetry_interval}] + if not sfapi.verify(vars.D1,'telemetry_interval', verify_list=match): + st.error("Failed to verify the default watermark telemetry interval value") + result += 1 + + if not result: + st.report_pass("snapshot_telemetry_interval_config_and_reset", "successful") + else: + st.report_fail("snapshot_telemetry_interval_config_and_reset", "failed") + +@pytest.mark.snapshot_regression +def test_ft_snapshot_interval(): + """ + Author : Phani kumar R (phanikumar.ravula@broadcom.com) + """ + st.banner('ft_sf_snapshot_interval, ft_sf_verify_default_snapshot_interval') + + result1=result2=0 + if not sfapi.config_snapshot_interval(vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): + st.error("Failed to configure snapshot interval") + result1 += 1 + match = [{'snapshotinterval': sf_data.snapshot_interval}] + if not sfapi.verify(vars.D1,'snapshot_interval', verify_list=match): + st.error("Failed to verify the configured snapshot interval") + result1 += 1 + st.report_tc_fail("ft_sf_snapshot_interval", "snapshot_interval_config", "failed") + else: + st.report_tc_pass("ft_sf_snapshot_interval", "snapshot_interval_config", "successful") + + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snaphot_interval"): + st.error("Failed to clear the snapshot interval") + result2 += 1 + match = [{'snapshotinterval': sf_data.default_snapshot_interval}] + if not sfapi.verify(vars.D1, 'snapshot_interval', verify_list=match): + st.error("Failed to reset the snapshot interval to default value after clear") + result2 += 1 + st.report_tc_fail("ft_sf_verify_default_snapshot_interval", "snapshot_verify_default_interval", "failed") + else: + st.report_tc_pass("ft_sf_verify_default_snapshot_interval", "snapshot_verify_default_interval", "successful") + if not (result1 or result2): + st.report_pass("snapshot_interval_config_and_reset", "successful") + else: + st.report_fail("snapshot_interval_config_and_reset", "failed") + +@pytest.mark.snapshot_regression +def test_ft_sf_all_buffer_stats_using_unicast_traffic(): + """ + Author : prudviraj k (prudviraj.kristipati@broadcom.com) and phani kumar ravula(phanikumar.ravula@broadcom.com) + """ + result = 0 + per_result = 0 + clr_result = 0 + if not sfapi.config_snapshot_interval(vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): + st.error("Failed to configure snapshot interval") + result += 1 + match = [{'snapshotinterval': sf_data.snapshot_interval}] + if not sfapi.verify(vars.D1,'snapshot_interval', verify_list=match): + st.error("Failed to verify the configured snapshot interval") + result += 1 + st.log("configuring the QOS maps") + if not cos_api.config_dot1p_to_tc_map(vars.D1, sf_data.obj_name[0], sf_data.dot1p_to_tc_map_dict): + st.error("Failed to configure qos map of type dot1p to tc") + if not cos_api.config_tc_to_pg_map(vars.D1, sf_data.obj_name[1], sf_data.tc_to_pg_map_dict): + st.error("Failed to configure qos map of type tc to pg") + if not cos_api.verify_qos_map_table(vars.D1, 'dot1p_to_tc_map', sf_data.obj_name[0], + {'0': '0', '1': '1', '2': '2', '3': '3', '4': '4', '5': '5', '6': '6', '7': '7'}): + st.error("Failed to verify configured dot1p to tc map values") + result += 1 + + if not cos_api.verify_qos_map_table(vars.D1, 'tc_to_pg_map', sf_data.obj_name[1], + {'0': '7', '1': '7', '2': '7', '3': '7', '4': '7', '5': '7', '6': '7', '7': '7'}): + st.error("Failed to verify configured tc to pg map values") + result += 1 + + if not cos_api.config_port_qos_map_all(vars.D1, sf_data.dot1p_tc_bind_map): + st.error("Failed to bind the configured qos map of type dot1p to tc on interface") + if not cos_api.config_port_qos_map_all(vars.D1, sf_data.tc_pg_bind_map): + st.error("Failed to bind the configured qos map of type tc to pg on interface") + + sf_tg_traffic_start_stop(sf_data.unicast, True) + st.wait(2 * sf_data.snapshot_interval) + + st.banner('#### PG_shared_for_user_watermark####') + st.banner('TC name :::: ft_sf_pg_shared_using_uwm ::::') + match = [{'pg7': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, 'user_watermark_PG_shared', verify_list=match, port_alias=vars.D1T1P1): + st.error("Failed to verify the user_watermark_PG_shared counter value") + result += 1 + st.report_tc_fail("ft_sf_pg_shared_using_uwm", "snapshot_tc_verify", "PG_shared_for_user_watermark", "failed") + else: + st.report_tc_pass("ft_sf_pg_shared_using_uwm", "snapshot_tc_verify", "PG_shared_for_user_watermark", "successful") + + st.banner('####verification_of_PG_shared_using_counter_DB####') + st.banner('TC name :::: ft_sf_pg_shared_using_Counter_DB ::::') + match = [{'SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, column_name="COUNTERS_PG_NAME_MAP",interface_name= vars.D1T1P1,queue_value=7,table_name="COUNTERS",verify_list=match): + st.error("Failed to verify the user_watermark_PG_shared counter DB value") + result += 1 + st.report_tc_fail("ft_sf_pg_shared_using_Counter_DB", "snapshot_tc_counter_DB_verify", "PG_shared", "failed") + else: + st.report_tc_pass("ft_sf_pg_shared_using_Counter_DB", "snapshot_tc_counter_DB_verify", "PG_shared", "successful") + + st.banner('TC name:::: ft_sf_queue_unicast_using_uwm ::::') + match = [{'uc0': sf_data.initial_counter_value}] + + + st.banner('#### queue_unicast_for_user_watermark using percentage values####') + if sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4, percentage=sf_data.percentage[0]): + st.error("Failed to verify the queue_user_watermark_unicast counter value using percentage") + result += 1 + per_result += 1 + + st.banner('#### queue_unicast_for_user_watermark using CLI####') + if sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4): + st.error("Failed to verify the queue_user_watermark_unicast counter value") + result += 1 + per_result += 1 + + if per_result: + st.report_tc_fail("ft_sf_queue_unicast_using_uwm", "snapshot_tc_verify", "queue_unicast_for_user_watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_unicast_using_uwm", "snapshot_tc_verify", "queue_unicast_for_user_watermark", "successful") + + + st.banner('####verification_of_queue_unicast_for_user_watermark_using_counter_DB####') + st.banner('TC name:::: ft_sf_queue_unicast_using_Counter_DB ::::') + match = [{'SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, column_name="COUNTERS_QUEUE_NAME_MAP",interface_name= vars.D1T1P4,queue_value=0,table_name="COUNTERS",verify_list=match): + st.error("Failed to verify the queue_user_watermark_unicast counter DB value") + result += 1 + st.report_tc_fail("ft_sf_queue_unicast_using_Counter_DB", "snapshot_tc_counter_DB_verify", "queue_unicast", "failed") + else: + st.report_tc_pass("ft_sf_queue_unicast_using_Counter_DB", "snapshot_tc_counter_DB_verify", "queue_unicast", "successful") + + st.banner('#### PG_shared_for_persistent_watermark####') + st.banner('TC name :::: ft_sf_pg_shared_using_persistent_wm ::::') + match = [{'pg7': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, 'persistent_PG_shared', verify_list=match, port_alias=vars.D1T1P2): + st.error("Failed to verify the persistent_watermark_PG_shared counter value") + result += 1 + st.report_tc_fail("ft_sf_pg_shared_using_persistent_wm", "snapshot_tc_verify", + "PG_shared_for_persistent_watermark", "failed") + else: + st.report_tc_pass("ft_sf_pg_shared_using_persistent_wm", "snapshot_tc_verify", + "PG_shared_for_persistent_watermark", "successful") + + st.banner('#### queue_unicast_for_persistent_watermark ####') + st.banner('TC name :::: ft_sf_queue_unicast_using_persistent_wm ::::') + match = [{'uc0': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, 'queue_persistent_watermark_unicast', verify_list=match, + port_alias=vars.D1T1P4): + st.error("Failed to verify the queue_persistent_watermark_unicast counter value") + result += 1 + st.report_tc_fail("ft_sf_queue_unicast_using_persistent_wm", "snapshot_tc_verify", + "queue_unicast_for_persistent_watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_unicast_using_persistent_wm", "snapshot_tc_verify", + "queue_unicast_for_persistent_watermark", "successful") + + sf_tg_traffic_start_stop(sf_data.unicast, False) + st.wait(2 * sf_data.snapshot_interval) + + st.banner('#### clear_PG_shared_for_user_watermark####') + st.banner('TC name :::: ft_sf_pg_shared_clear_using_uwm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[0],table=sf_data.table[0],counter_type=sf_data.PG[0]): + st.error("Failed to execute the command clear {} snapshot counters".format(sf_data.group[0])) + result += 1 + match = [{'pg0': sf_data.initial_counter_value}] + if not sfapi.verify(vars.D1, 'user_watermark_PG_shared', verify_list=match, port_alias=vars.D1T1P1): + st.error("Failed to clear the snapshot counters") + result += 1 + st.report_tc_fail("ft_sf_pg_shared_clear_using_uwm", "snapshot_clear_verify", "clearing the PG shared counters for user_watermark", "failed") + else: + st.report_tc_pass("ft_sf_pg_shared_clear_using_uwm", "snapshot_clear_verify", "clearing the PG shared counters for user watermark", "successful") + + + + st.banner('TC name :::: ft_sf_queue_unicast_clear_using_uwm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], + table=sf_data.table[0], counter_type=sf_data.PG[2]): + st.error("Failed to execute the command clear {} snapshot counters".format(sf_data.group[0])) + result += 1 + match = [{'uc0': sf_data.initial_counter_value}] + + + st.banner('#### clear_queue_unicast_percentage_Values_for_user_watermark ####') + if not sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4, percentage=sf_data.percentage[1]): + st.error("Failed to clear percentage snapshot counters") + result += 1 + clr_result += 1 + st.banner('#### clear_queue_unicast_for_user_watermark using CLI####') + if not sfapi.verify(vars.D1, 'queue_user_watermark_unicast', verify_list=match, port_alias=vars.D1T1P4): + st.error("Failed to clear the snapshot counters") + result += 1 + clr_result += 1 + + if clr_result: + st.report_tc_fail("ft_sf_queue_unicast_clear_using_uwm", "snapshot_clear_verify", "clearing the unicast queue counters for user watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_unicast_clear_using_uwm", "snapshot_clear_verify", "clearing the unicast queue counters for user watermark", "successful") + + + + st.banner('#### clear_PG_shared_for_persistent_watermark ####') + st.banner('TC name :::: ft_sf_pg_shared_clear_using_persistent_wm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[0],table=sf_data.table[1],counter_type=sf_data.PG[0]): + st.error("Failed to execute the command clear {} snapshot counters".format(sf_data.group[0])) + result += 1 + match = [{'pg0': sf_data.initial_counter_value}] + if not sfapi.verify(vars.D1, 'persistent_PG_shared', verify_list=match, port_alias=vars.D1T1P2): + st.error("Failed to clear the snapshot counters") + result += 1 + st.report_tc_fail("ft_sf_pg_shared_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the PG shared counters for persistent watermark", "failed") + else: + st.report_tc_pass("ft_sf_pg_shared_clear_using_persistent_wm", "snapshot_clear_verify", "clearing the PG shared counters for persistent watermark", "successful") + + st.banner('#### clear_queue_unicast_for_persistent_watermark ####') + st.banner('TC name :::: ft_sf_queue_unicast_clear_using_persistent_wm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], + table=sf_data.table[1], counter_type=sf_data.PG[2]): + st.error("Failed to execute the command clear {} snapshot counters".format(sf_data.group[0])) + result += 1 + match = [{'uc0': sf_data.initial_counter_value}] + if not sfapi.verify(vars.D1, 'queue_persistent_watermark_unicast', verify_list=match, + port_alias=vars.D1T1P4): + st.error("Failed to clear the snapshot counters") + result += 1 + st.report_tc_fail("ft_sf_queue_unicast_clear_using_persistent_wm", "snapshot_clear_verify", + "clearing the unicast queue counters for persistent watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_unicast_clear_using_persistent_wm", "snapshot_clear_verify", + "clearing the unicast queue counters for persistent watermark", "successful") + clear_qos_map_config() + if not result: + st.report_pass("snapshot_all_buffer_counters", "unicast", "successful") + else: + sf_collecting_debug_logs_when_test_fails() + st.report_fail("snapshot_all_buffer_counters", "unicast", "failed") + + +@pytest.mark.snapshot_regression +def test_ft_sf_all_buffer_stats_using_multicast_traffic(): + """ + Author : prudviraj k (prudviraj.kristipati@broadcom.com) and phani kumar ravula(phanikumar.ravula@broadcom.com) + """ + result = 0 + if not sfapi.config_snapshot_interval(vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): + st.error("Failed to configure snapshot interval") + result += 1 + match = [{'snapshotinterval': sf_data.snapshot_interval}] + if not sfapi.verify(vars.D1,'snapshot_interval', verify_list=match): + st.error("Failed to verify the configured snapshot interval") + result += 1 + + if sfapi.multicast_queue_start_value(vars.D1, 'queue_user_watermark_multicast', port_alias=vars.D1T1P4): + match = [{'mc8': sf_data.initial_counter_value}] + else: + match = [{'mc10': sf_data.initial_counter_value}] + + sf_tg_traffic_start_stop(sf_data.multicast, True) + st.wait(2 * sf_data.snapshot_interval) + + st.banner('#### queue_multicast_for_user_watermark ####') + st.banner('TC name:::: ft_sf_queue_multicast_using_uwm ::::') + + if sfapi.verify(vars.D1, 'queue_user_watermark_multicast', verify_list=match, port_alias=vars.D1T1P4): + st.error("Failed to verify the queue_user_watermark_multicast counter value") + result += 1 + st.report_tc_fail("ft_sf_queue_multicast_using_uwm", "snapshot_tc_verify", "queue_multicast_for_user_watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_multicast_using_uwm", "snapshot_tc_verify", "queue_multicast_for_user_watermark", "successful") + + + st.banner('#### queue_multicast_for_persistent_watermark ####') + st.banner('TC name :::: ft_sf_queue_multicast_using_persistent_wm ::::') + + if sfapi.verify(vars.D1, 'queue_persistent_watermark_multicast', verify_list=match, + port_alias=vars.D1T1P4): + st.error("Failed to verify the queue_persistent_watermark_unicast counter value") + result += 1 + st.report_tc_fail("ft_sf_queue_multicast_using_persistent_wm", "snapshot_tc_verify", "queue_multicast_for_persistent_watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_multicast_using_persistent_wm", "snapshot_tc_verify", "queue_multicast_for_persistent_watermark", "successful") + + sf_tg_traffic_start_stop(sf_data.multicast, False) + st.wait(2 * sf_data.snapshot_interval) + + st.banner('#### clear_queue_multicast_for_user_watermark ####') + st.banner('TC name :::: ft_sf_queue_multicast_clear_using_uwm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], + table=sf_data.table[0], counter_type=sf_data.PG[3]): + st.error("Failed to execute the command clear {} snapshot counters".format(sf_data.PG[3])) + result += 1 + + if not sfapi.verify(vars.D1, 'queue_user_watermark_multicast', verify_list=match, port_alias=vars.D1T1P4): + st.error("Failed to clear the snapshot counters") + result += 1 + st.report_tc_fail("ft_sf_queue_multicast_clear_using_uwm", "snapshot_clear_verify", "clearing the multicast queue counters for user watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_multicast_clear_using_uwm", "snapshot_clear_verify", "clearing the multicast queue counters for user watermark", "successful") + + + st.banner('#### clear_queue_multicast_for_persistent_watermark ####') + st.banner('TC name :::: ft_sf_queue_multicast_clear_using_persistent_wm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_snapshot_counters", group=sf_data.group[1], + table=sf_data.table[1], counter_type=sf_data.PG[3]): + st.error("Failed to execute the command clear {} snapshot counters".format(sf_data.PG[3])) + result += 1 + + if not sfapi.verify(vars.D1, 'queue_persistent_watermark_multicast', verify_list=match, + port_alias=vars.D1T1P4): + st.error("Failed to clear the snapshot counters") + result += 1 + st.report_tc_fail("ft_sf_queue_multicast_clear_using_persistent_wm", "snapshot_clear_verify", + "clearing the multicast queue counters for persistent watermark", "failed") + else: + st.report_tc_pass("ft_sf_queue_multicast_clear_using_persistent_wm", "snapshot_clear_verify", + "clearing the multicast queue counters for persistent watermark", "successful") + + if not result: + st.report_pass("snapshot_all_buffer_counters", "multicast", "successful") + else: + sf_collecting_debug_logs_when_test_fails() + st.report_fail("snapshot_all_buffer_counters", "multicast", "failed") + + +@pytest.mark.snapshot_regression +def test_ft_sf_periodic_verify_using_counter_DB(): + """ + Author : prudviraj k (prudviraj.kristipati@broadcom.com) and phani kumar ravula(phanikumar.ravula@broadcom.com) + """ + result = 0 + if not sfapi.config_snapshot_interval(vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): + st.error("Failed to configure snapshot interval") + result += 1 + match = [{'snapshotinterval': sf_data.snapshot_interval}] + if not sfapi.verify(vars.D1,'snapshot_interval', verify_list=match): + st.error("Failed to verify the configured snapshot interval") + result += 1 + #sf_tg_traffic_start_stop(sf_data.periodic, True) + st.wait(2 * sf_data.snapshot_interval) + + st.banner('####verification_of_queue_unicast_periodic_update_using_counter_DB####') + st.banner('TC name:::: ft_sf_verify_time_stamp_using_counter_DB::::') + + + counters=sfapi.show(vars.D1,column_name="COUNTERS_QUEUE_NAME_MAP", interface_name=vars.D1T1P4, queue_value=0, table_name="COUNTERS") + + Timestamp_40 = counters[0]['SAI_QUEUE_STAT_TIMESTAMP'] + Time_40 = Timestamp_40.split('.') + + st.log("Collecting Time stamp when traffic rate is 40..... :{}".format(Time_40[1])) + st.wait(sf_data.snapshot_interval) + + counters = sfapi.show(vars.D1, column_name="COUNTERS_QUEUE_NAME_MAP", interface_name=vars.D1T1P4, queue_value=0,table_name="COUNTERS") + + Timestamp_100 = counters[0]['SAI_QUEUE_STAT_TIMESTAMP'] + Time_100 = Timestamp_100.split('.') + st.log("Collecting Time stamp when traffic rate is 100 ..... :{}".format(Time_100[1])) + + Timestamp_diff= datetime.strptime(Time_100[1], sf_data.FMT) - datetime.strptime(Time_40[1], sf_data.FMT) + st.log("Time stamp difference in Seconds is :{}".format(Timestamp_diff.seconds)) + if (Timestamp_diff.seconds) < (sf_data.snapshot_interval): + st.error("Time stamp interval is not increementing correctly") + result += 1 + + if not result: + st.report_pass("snapshot_tc_verify", "Time stamp interval", "successful") + else: + sf_collecting_debug_logs_when_test_fails() + st.report_fail("snapshot_tc_verify", "Time stamp interval", "failed") + + + """ + ############################## Below TCs are permanent readme so not testing now################################### + st.banner('TC name:::: ft_sf_verify_periodic_update_using_counter_DB,ft_sf_verify_percentage_value_using_counter_DB ::::') + counters_40=counters[0]['SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES'] + st.log("unicast queue counter with traffic rate 40 is :{}".format(counters_40)) + percentage_40=counters[0]['SAI_QUEUE_PERCENT_STAT_SHARED_WATERMARK'] + + + + sf_tg_traffic_start_stop(sf_data.periodic, False) + + st.wait(sf_data.snapshot_interval) + st.log("Sending traffic with rate 100") + + sf_tg_traffic_start_stop(sf_data.unicast, True) + st.wait(2 * sf_data.snapshot_interval) + + + + counters_100=counters[0]['SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES'] + st.log("unicast queue counter with traffic rate 100 is :{}".format(counters_100)) + + percentage_100=counters[0]['SAI_QUEUE_PERCENT_STAT_SHARED_WATERMARK'] + st.log("unicast queue percentage value with traffic rate 100 is :{}".format(percentage_100)) + + + sf_tg_traffic_start_stop(sf_data.unicast, False) + st.wait(2 * sf_data.snapshot_interval) + + + if counters_40 >= counters_100 or counters_40 == 0 or counters_100 == 0: + st.error("counters values are not increemented after increasing the transmit rate") + result += 1 + + st.report_tc_fail("ft_sf_verify_periodic_update_using_counter_DB", "snapshot_tc_verify", "periodic update","failed") + else: + st.report_tc_pass("ft_sf_verify_periodic_update_using_counter_DB", "snapshot_tc_verify", "periodic update","successful") + + if percentage_100 <= percentage_40: + st.error("percentage value is not increemented after increasing the transmit rate") + result += 1 + + st.report_tc_fail("ft_sf_verify_periodic_update_using_counter_DB", "snapshot_tc_verify", "periodic percentage update", + "failed") + else: + st.report_tc_pass("ft_sf_verify_periodic_update_using_counter_DB", "snapshot_tc_verify", "periodic percentage update", + "successful") + + + st.banner('#### clearing_Counter_DB_counters ########') + st.banner('TC name :::: ft_sf_verify_periodic_clear_using_counter_DB ::::') + + counters=sfapi.show(vars.D1,column_name="COUNTERS_QUEUE_NAME_MAP", interface_name=vars.D1T1P4, queue_value=0, table_name="COUNTERS") + + counters = counters[0]['SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES'] + st.log("unicast queue counter after stopping the traffic is :{}".format(counters)) + + match = [{'SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES': sf_data.initial_counter_value}] + if not sfapi.verify(vars.D1, column_name="COUNTERS_QUEUE_NAME_MAP", interface_name=vars.D1T1P4, queue_value=0, table_name="COUNTERS", verify_list=match): + st.error("counters values are not increemented after increasing the transmit rate") + result += 1 + + st.report_tc_fail("ft_sf_verify_periodic_clear_using_counter_DB", "snapshot_tc_verify", "periodic update and clear", "failed") + else: + st.report_tc_pass("ft_sf_verify_periodic_clear_using_counter_DB", "snapshot_tc_verify", "periodic update and clear","successful") + """ + +@pytest.mark.snapshot_regression +def test_ft_sf_verify_buffer_pool_counters(): + """ + Author : prudviraj k (prudviraj.kristipati@broadcom.com) and phani kumar ravula(phanikumar.ravula@broadcom.com) + """ + result = 0 + per_result = 0 + sf_data.platform_name_summary = get_platform_summary(vars.D1) + sf_data.platform_name = sf_data.platform_name_summary["platform"] + sf_data.platform_hwsku = sf_data.platform_name_summary["hwsku"] + + path= "/usr/share/sonic/device/{}/{}/{}".format(sf_data.platform_name,sf_data.platform_hwsku,sf_data.device_j2_file) + convert_json = "sonic-cfggen -d -t " "{} > {}".format(path,sf_data.config_file) + sfapi.load_json_config(vars.D1, convert_json, sf_data.config_file) + reboot_api.config_save_reload(vars.D1) + st.log("To make sure after reload DUT is fully operational") + st.wait(sf_data.reload_interval) + + if not sfapi.config_snapshot_interval(vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): + st.error("Failed to configure snapshot interval") + result += 1 + match = [{'snapshotinterval': sf_data.snapshot_interval}] + if not sfapi.verify(vars.D1,'snapshot_interval', verify_list=match): + st.error("Failed to verify the configured snapshot interval") + result += 1 + + sf_tg_traffic_start_stop(sf_data.unicast, True) + st.log("waiting for two snapshot interval times to get the counter values reflect correctly") + st.wait(2 * sf_data.snapshot_interval) + + st.banner('#### buffer_Pool_for_user_watermark####') + st.banner('TC name :::: ft_sf_buffer_pool_using_uwm ::::') + match = {'pool': 'ingress_lossless_pool'} + value = {'bytes': sf_data.initial_counter_value } + if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_watermark', verify_list=match, key=value): + st.error("Failed to verify the buffer pool counters for user watermark") + result += 1 + st.report_tc_fail("ft_sf_buffer_pool_using_uwm", "snapshot_tc_verify", "buffer_pool_for_user_watermark", "failed") + else: + st.report_tc_pass("ft_sf_buffer_pool_using_uwm", "snapshot_tc_verify", "buffer_pool_for_user_watermark", "successful") + + + st.banner('TC name :::: ft_sf_buffer_pool_using_persistent_wm ::::') + st.banner('#### buffer_pool_for_persistent_watermark using percentage####') + match = {'pool': 'egress_lossless_pool'} + value = {'percent': sf_data.initial_counter_value } + if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_persistent-watermark', verify_list=match, key=value, percent=sf_data.percentage[0]): + st.error("Failed to verify the buffer pool counters for persistent watermark") + result += 1 + per_result += 1 + st.banner('#### buffer_pool_for_persistent_watermark using CLI####') + match = {'pool': 'egress_lossless_pool'} + value = {'bytes': sf_data.initial_counter_value } + if sfapi.verify_buffer_pool(vars.D1, 'buffer_pool_persistent-watermark', verify_list=match, key=value): + st.error("Failed to verify the buffer pool counters for persistent watermark") + result += 1 + per_result += 1 + if per_result: + st.report_tc_fail("ft_sf_buffer_pool_using_persistent_wm", "snapshot_tc_verify", + "buffer_pool_for_persistent_watermark", "failed") + else: + st.report_tc_pass("ft_sf_buffer_pool_using_persistent_wm", "snapshot_tc_verify", + "buffer_pool_for_persistent_watermark", "successful") + + st.banner('#### buffer_pool_using_counter_DB ####') + st.banner('TC name :::: ft_sf_buffer_pool_using_counter_DB ::::') + + match = [{'SAI_BUFFER_POOL_STAT_WATERMARK_BYTES': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, 'buffer_pool_counters_DB', oid_type='ingress_lossless_pool', verify_list=match): + st.error("Failed to verify the ingress lossless buffer pool counter using counter DB value") + result += 1 + st.report_tc_fail("ft_sf_buffer_pool_using_counter_DB", "snapshot_tc_verify", "ingress lossless buffer pool", + "failed") + else: + st.report_tc_pass("ft_sf_buffer_pool_using_counter_DB", "snapshot_tc_verify", "ingress lossless buffer pool", + "successful") + + sf_tg_traffic_start_stop(sf_data.unicast, False) + st.log("waiting for two snapshot interval times to get the counter values reflect correctly") + st.wait(2 * sf_data.snapshot_interval) + + st.banner('#### clear_buffer_Pool_for_user_watermark####') + st.banner('TC name :::: ft_sf_buffer_pool_clear_using_uwm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_buffer-pool watermark"): + st.error("Failed to clear buffer-pool watermark") + + st.log("After clear buffer_pool checking the stats with 10 cells tolerance") + counters= sfapi.get(vars.D1, 'buffer_pool_watermark', get_value='bytes', match={'pool':'ingress_lossless_pool'}) + if counters > sf_data.buffer_pool_tolerance: + st.error("Failed to clear the buffer pool counters for user watermark") + result += 1 + st.report_tc_fail("ft_sf_buffer_pool_clear_using_uwm", "snapshot_tc_verify", "buffer_pool_clear_for_user_watermark", "failed") + else: + st.report_tc_pass("ft_sf_buffer_pool_clear_using_uwm", "snapshot_tc_verify", "buffer_pool_clear_for_user_watermark", "successful") + + st.banner('#### clear_buffer_pool_for_persistent_watermark ####') + st.banner('TC name :::: ft_sf_buffer_pool_clear_using_persistent_wm ::::') + if not sfapi.config_snapshot_interval(vars.D1, snap="clear_buffer-pool persistent-watermark"): + st.error("Failed to clear_buffer-pool persistent-watermark") + + st.log("After clear buffer_pool checking the stats with 10 cells tolerance") + counters = sfapi.get(vars.D1, 'buffer_pool_watermark',get_value='bytes', match={'pool':'egress_lossless_pool'}) + if counters > sf_data.buffer_pool_tolerance: + st.error("Failed to clear the buffer pool counters for persistent watermark") + result += 1 + st.report_tc_fail("ft_sf_buffer_pool_clear_using_persistent_wm", "snapshot_tc_verify", + "buffer_pool_clear_for_persistent_watermark", "failed") + else: + st.report_tc_pass("ft_sf_buffer_pool_clear_using_persistent_wm", "snapshot_tc_verify", + "buffer_pool_clear_for_persistent_watermark", "successful") + if not result: + st.report_pass("snapshot_tc_verify", "buffer pool", "successful") + else: + sf_collecting_debug_logs_when_test_fails() + st.report_fail("snapshot_tc_verify", "buffer pool", "failed") + +@pytest.mark.snapshot_regression +def test_ft_sf_verify_cpu_counters(): + """ + Author : prudviraj k (prudviraj.kristipati@broadcom.com) and phani kumar ravula(phanikumar.ravula@broadcom.com) + """ + result = 0 + if not sfapi.config_snapshot_interval(vars.D1, snap="interval", interval_val=sf_data.snapshot_interval): + st.error("Failed to configure snapshot interval") + result += 1 + match = [{'snapshotinterval': sf_data.snapshot_interval}] + if not sfapi.verify(vars.D1,'snapshot_interval', verify_list=match): + st.error("Failed to verify the configured snapshot interval") + result += 1 + + st.log("sflow configuration") + sflow.enable_disable_config(vars.D1, interface=False, interface_name=None, action="enable") + sflow.config_attributes(vars.D1, sample_rate=sf_data.sflow_sample_rate, interface_name=vars.D1T1P1) + + sf_tg_traffic_start_stop(sf_data.unicast, True) + st.log("waiting for two snapshot interval times to get the counter values reflect correctly") + st.wait(2 * sf_data.snapshot_interval) + + st.banner('TC name:::: ft_sf_verify_cpu_counter_value_using_counter_DB ::::') + + st.banner('#### cpu_counters using CLI ####') + match = [{'CPU:3': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, 'queue_user_watermark_cpu', verify_list=match, port_alias="CPU"): + st.error("Failed to verify the cpu counter value using CLI") + result += 1 + + st.banner('#### cpu_counters using counter DB ####') + match = [{'SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES': sf_data.initial_counter_value}] + if sfapi.verify(vars.D1, column_name="COUNTERS_QUEUE_NAME_MAP",interface_name= 'CPU',queue_value=3,table_name="COUNTERS",verify_list=match): + st.error("Failed to verify the cpu counter value using counter DB") + result += 1 + sf_collecting_debug_logs_when_test_fails() + if result: + st.report_fail("snapshot_tc_verify", "cpu", "failed") + else: + st.report_pass("snapshot_tc_verify", "cpu", "successful") diff --git a/spytest/tests/system/test_snmp.py b/spytest/tests/system/test_snmp.py index 73333827bb5..3a926b79b28 100644 --- a/spytest/tests/system/test_snmp.py +++ b/spytest/tests/system/test_snmp.py @@ -100,7 +100,6 @@ def initialize_variables(): data.oid_dot1q_Vlan_Static_Untagged_Ports = '1.3.6.1.2.1.17.7.1.4.3.1.4' data.oid_dot1q_Vlan_Static_Row_Status = '1.3.6.1.2.1.17.7.1.4.3.1.5' data.oid_dot1q_Pvid = '1.3.6.1.2.1.17.7.1.4.5.1.1' - data.oid_memory = "UCD-SNMP-MIB::memory" data.source_mac = "00:0a:01:00:00:01" data.source_mac1 = "00:0a:02:00:00:01" data.vlan = str(random_vlan_list()[0]) @@ -111,7 +110,31 @@ def initialize_variables(): data.nsNotifyShutdown='8072.4.0.2' data.filter = '-Oqv' data.brcmSonicConfigChange = '2.1.2.0.1' - + data.oid_ifx_table = '1.3.6.1.2.1.31.1.1' + data.oid_ip_System_Stats_Table = '1.3.6.1.2.1.4.31.1' + data.oid_ip_IfStats_Table = '1.3.6.1.2.1.4.31.3' + data.oid_ip_Address_Table = '1.3.6.1.2.1.4.34' + data.oid_ip_NetToPhysical_Table = '1.3.6.1.2.1.4.35' + data.oid_icmp_Msgs = '1.3.6.1.2.1.5' + data.oid_tcp_mib = '1.3.6.1.2.1.6' + data.oid_udp_mib = '1.3.6.1.2.1.7' + data.oid_snmpv2_mib = '1.3.6.1.2.1.11' + data.oid_host_resource_mib = '1.3.6.1.2.1.25' + data.oid_framework_mib = '1.3.6.1.6.3.10' + data.oid_mpd_mib = '1.3.6.1.6.3.11' + data.oid_target_mib = '1.3.6.1.6.3.12' + data.oid_notification_mib = '1.3.6.1.6.3.13' + data.oid_user_based_sm_mib = '1.3.6.1.6.3.15' + data.oid_view_based_acm_mib = '1.3.6.1.6.3.16' + data.oid_ent_physical_table = '1.3.6.1.2.1.47.1.1.1' + data.oid_ent_phy_sensor_table = '1.3.6.1.2.1.99.1.1' + data.oid_dot3_stats_table = '1.3.6.1.2.1.10.7.2' + data.oid_net_snmp_agent_mib = '1.3.6.1.4.1.8072.1' + data.oid_net_snmp_vacm_mib = '1.3.6.1.4.1.8072.1.9' + data.oid_ucd_diskio_mib = '1.3.6.1.4.1.2021.13.15' + data.oid_ucd_memory = '1.3.6.1.4.1.2021.4' + data.oid_ucd_la_table = '1.3.6.1.4.1.2021.10' + data.oid_ucd_system_stats = '1.3.6.1.4.1.2021.11' def snmp_pre_config(): """ @@ -357,14 +380,6 @@ def test_ft_snmp_sysContact(): st.report_fail("sysContact_verification_fail") st.report_pass("test_case_passed") -def test_ft_snmp_mib_memory(): - output=basic_obj.get_memory_info(vars.D1) - get_snmp_output = snmp_obj.get_snmp_memory(ipaddress, data.oid_memory,data.ro_community) - if output['total']==int(get_snmp_output['TotalReal']) and output['used']==int(get_snmp_output['AvailReal']): - st.report_pass("test_case_passed") - st.report_fail("test_case_failed, output= {}, dnmp_output={}".format(output,get_snmp_output)) - - @pytest.mark.regression @pytest.mark.snmp_mib_2 @pytest.mark.community @@ -565,6 +580,356 @@ def test_ft_snmp_ipcidr_route_table(): st.report_fail("get_snmp_output_fail") st.report_pass("test_case_passed") +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ifx_table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for ifXTable Object gives list of interface entries on the switch. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ifx_table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ip_System_Stats_Table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for ipSystemStatsTable Object gives the table containing system wide, IP version specific traffic statistics. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ip_System_Stats_Table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ip_IfStats_Table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for ipIfStatsTable Object gives the table containing per-interface traffic statistics. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ip_IfStats_Table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ip_Address_Table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for ipAddressTable Object gives table contains addressing information relevant to the entity's interfaces. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ip_Address_Table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ip_NetToPhysical_Table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for ipNetToPhysicalTable Object gives each entry contains one IP address to physical address equivalence. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ip_NetToPhysical_Table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_icmp_Msgs(): + """ + Author : Prasad Darnasi + Verify snmpwalk for icmpMsgs Object gives Internet Control Message Protocol related information. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_icmp_Msgs, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_tcp_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for TCP-MIB Object gives Transmission Control Protocol related information. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_tcp_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_udp_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for UDP-MIB Object gives User Datagram Protocol related information. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_udp_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_snmpv2_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMPv2-MIB Object gives system SNMP variables. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_snmpv2_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_host_resource_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for HOST-RESOURCES-MIB Object gives a uniform set of objects useful for the management of host computers. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_host_resource_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_framework_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMP-FRAMEWORK-MIB Object gives The SNMP Management Architecture information. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_framework_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_mpd_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMP-MPD-MIB Object gives the compliance statement for SNMP entities which implement the SNMP-MPD-MIB. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_mpd_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_target_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMP-TARGET-MIB Object it defines MIB objects which provide mechanisms to remotely configure the parameters. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_target_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_notification_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMP-NOTIFICATION-MIB Object defines MIB objects which provide mechanisms to remotely configure the parameters. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_notification_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_user_based_sm_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMP-USER-BASED-SM-MIB Object gives the management information definitions for the SNMP User-based Security Model. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_user_based_sm_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_view_based_acm_mib(): + """ + Author : Prasad Darnasi + Verify snmpwalk for SNMP-VIEW-BASED-ACM-MIB Object gives the management information definitions for the View-based Access Control Model for SNMP. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_view_based_acm_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ent_physical_table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for entPhysicalTableObject gives the Information about a particular physical entity. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ent_physical_table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ent_phy_sensor_table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for entPhySensorTable gives The type of data returned by the associated entPhySensorValue object. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ent_phy_sensor_table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_dot3_stats_table(): + """ + Author : Prasad Darnasi + Verify snmpwalk for dot3StatsTable gives Statistics for a collection of ethernet-like interfaces attached to a particular system. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_dot3_stats_table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_net_snmp_agent_mib(): + """ + Author : Prasad Darnasi + *Verify snmpwalk for NET-SNMP-AGENT-MIB is successfull. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_net_snmp_agent_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_net_snmp_vacm_mib(): + """ + Author : Prasad Darnasi + *Verify snmpwalk for NET-SNMP-VACM-MIB defines Net-SNMP extensions to the standard VACM view table. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_net_snmp_vacm_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ucd_diskio_mib(): + """ + Author : Prasad Darnasi + *Verify snmpwalk for UCD-DISKIO-MIB defines objects for disk IO statistics. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ucd_diskio_mib, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ucd_memory(): + """ + Author : Prasad Darnasi + *Verify snmpwalk for UCD-MEMORY-MIB it gives Memory Statistics. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ucd_memory, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ucd_la_table(): + """ + Author : Prasad Darnasi + *Verify snmpwalk for UCD-LaTable-MIB it gives the 1,5 and 15 minute load averages. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ucd_la_table, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_new +def test_ft_snmp_ucd_system_stats(): + """ + Author : Prasad Darnasi + *Verify snmpwalk for UCD-SystemStats-MIB it gives the CPU related information. + Reference Test Bed : D1 --- Mgmt Network + """ + get_snmp_output = snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_ucd_system_stats, + community_name=data.ro_community) + if not get_snmp_output: + st.report_fail("get_snmp_output_fail") + st.report_pass("test_case_passed") + @pytest.mark.snmp_dot1d_bridge @pytest.mark.regression def test_ft_snmp_dot1d_base_bridge_address(): @@ -1165,3 +1530,65 @@ def test_ft_snmp_docker_restart(): st.report_fail("sysName_verification_fail_after_docker_restart") st.report_pass("test_case_passed") +@pytest.mark.regression +@pytest.mark.snmp_counters +def test_ft_snmp_basic_counters(): + """ + Author : Prasad Darnasi + *Verify snmp basic counters. + Reference Test Bed : D1 --- Mgmt Network + """ + snmp_obj.clear_snmp_counters(vars.D1) + # simulating snmp counters + snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_sysName, + community_name=data.ro_community, filter=data.filter) + + output = snmp_obj.verify_snmp_counters(vars.D1, map={"snmp_packets_input": 2, "requested_variables": 2, + "get_request_pdus": 1, "get_next_pdus": 1, + "snmp_packets_output": 2, "response_pdus": 2}) + + if not output: + st.report_fail("snmp_counter_not_incremented") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_counters +def test_ft_snmp_trap_counter(): + """ + Author : Prasad Darnasi + *Verify snmp trap pdus. + Reference Test Bed : D1 --- Mgmt Network + """ + snmp_obj.clear_snmp_counters(vars.D1) + # simulating trap pdu + intf_obj.interface_shutdown(vars.D1, vars.D1T1P1) + intf_obj.interface_noshutdown(vars.D1, vars.D1T1P1) + + result = snmp_obj.verify_snmp_counters(vars.D1, map={"trap_pdus":3}) + + if not result: + st.report_fail("snmp_counter_not_incremented") + st.report_pass("test_case_passed") + +@pytest.mark.regression +@pytest.mark.snmp_counters +def test_ft_snmp_counter_negative_tests(): + """ + Author : Prasad Darnasi + *Verify snmp trap pdus. + Reference Test Bed : D1 --- Mgmt Network + """ + snmp_obj.clear_snmp_counters(vars.D1) + # simulating snmp counters + + snmp_obj.walk_snmp_operation(ipaddress=ipaddress, oid=data.oid_sysName, + community_name="test_test", filter=data.filter, report=False) + snmp_obj.walk_snmp_operation(ipaddress=ipaddress, version='1', oid=data.oid_sysName, + community_name=data.ro_community, filter=data.filter, report=False) + + output = snmp_obj.verify_snmp_counters(vars.D1, map={"unknown_community_name": 6, "snmp_version_errors": 6}) + + if not output: + st.report_fail("snmp_counter_not_incremented") + st.report_pass("test_case_passed") + diff --git a/spytest/tests/system/test_ssh.py b/spytest/tests/system/test_ssh.py index 88736b7a47d..1e9c3a578cf 100644 --- a/spytest/tests/system/test_ssh.py +++ b/spytest/tests/system/test_ssh.py @@ -218,6 +218,7 @@ def test_ft_ssh_add_user_verify(): acl_config = acl_data.acl_json_config_control_plane st.log("ACL_DATA: {}".format(acl_config)) acl_obj.apply_acl_config(vars.D1, acl_config) + st.wait(3, "Waiting to apply acl rules") acl_obj.show_acl_table(vars.D1) acl_obj.show_acl_rule(vars.D1) @@ -296,11 +297,16 @@ def test_ft_ssh_add_user_verify(): st.log('Verifying SNMP ACL with invalid source address') change_acl_rules(acl_data.acl_json_config_control_plane, "SNMP_SSH|RULE_1", "SRC_IP", "2.2.2.0/24") + change_acl_rules(acl_data.acl_json_config_control_plane, "SNMP_SSH|RULE_2", "SRC_IP", "2.2.2.0/24") acl_config = acl_data.acl_json_config_control_plane acl_obj.acl_delete(vars.D1) acl_obj.apply_acl_config(vars.D1, acl_config) - st.wait(3, "Waiting to apply acl rules") - snmp_out = execute_command(ssh_conn_obj, snmp_cmd) + for _ in range(1,15): + snmp_out = execute_command(ssh_conn_obj, snmp_cmd) + if "Timeout" in snmp_out: + break + else: + st.wait(1) if "Timeout" not in snmp_out: acl_snmp = + 1 st.log("connecting to device with default username={},password={}".format(ssh_data.usr_default, ssh_data.pwd_final)) @@ -368,6 +374,7 @@ def test_ft_control_plane_acl_icmp(): acl_config = acl_data.acl_json_config_control_plane_v2 st.log("ACL_DATA: {}".format(acl_config)) acl_obj.apply_acl_config(vars.D1, acl_config) + st.wait(3, "Waiting to apply acl rules") acl_obj.show_acl_table(vars.D1) acl_obj.show_acl_rule(vars.D1) if ip_obj.ping(dut=vars.D2,addresses=d1_ipaddress): @@ -382,6 +389,7 @@ def test_ft_control_plane_acl_icmp(): change_acl_rules(acl_config, "L3_IPV6_ICMP|rule1", "SRC_IPV6", "{}/128".format(d1_ipv6address)) acl_obj.acl_delete(vars.D1) acl_obj.apply_acl_config(vars.D1, acl_config) + st.wait(3, "Waiting to apply acl rules") if not ip_obj.ping(dut=vars.D2,addresses=d1_ipaddress): st.error("ICMP ipv4 packets are dropped with applied control plane acl rules.") result = False diff --git a/spytest/tests/system/threshold/test_threshold.py b/spytest/tests/system/threshold/test_threshold.py new file mode 100644 index 00000000000..456daaaab55 --- /dev/null +++ b/spytest/tests/system/threshold/test_threshold.py @@ -0,0 +1,384 @@ +# Threshold Feature FT test cases. +# Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) + +import pytest + +from spytest import st, tgapi, SpyTestDict +from spytest.utils import random_vlan_list + +import apis.system.threshold as tfapi +import apis.switching.vlan as vapi +import apis.system.basic as bcapi +import apis.system.box_services as bsapi +import apis.system.switch_configuration as scapi + +import utilities.utils as cutils + +@pytest.fixture(scope="module", autouse=True) +def threshold_feature_module_hooks(request): + global_vars_and_constants_init() + tf_module_config(config='yes') + yield + tf_module_config(config='no') + + +@pytest.fixture(scope="function", autouse=True) +def threshold_feature_func_hooks(request): + verify_system_map_status(tf_data.max_time_to_check_sys_maps[0], tf_data.max_time_to_check_sys_maps[1]) + yield + + +def global_vars_and_constants_init(): + global vars + global tf_data + vars = st.ensure_min_topology('D1T1:4') + tf_data = SpyTestDict() + hw_constants = st.get_datastore(vars.D1, "constants") + scapi.get_running_config(vars.D1) + # Global Vars + tf_data.tg_port_list = [vars.T1D1P1, vars.T1D1P2, vars.T1D1P3, vars.T1D1P4] + tf_data.port_list = [vars.D1T1P1, vars.D1T1P2, vars.D1T1P3, vars.D1T1P4] + tf_data.platform = bcapi.get_hwsku(vars.D1).lower() + tf_data.unicast = 'unicast' + tf_data.multicast = 'multicast' + tf_data.tg_current_mode = tf_data.unicast + tf_data.queues_to_check = ['COUNTERS_PG_NAME_MAP', 'COUNTERS_QUEUE_NAME_MAP'] + tf_data.max_time_to_check_sys_maps = [150, 2] # Seconds + tf_data.traffic_duration = 3 # Seconds + tf_data.test_max_retries_count = 3 + tf_data.need_debug_prints = True + # Common Constants + tf_data.pg_headroom_un_supported_platforms = hw_constants['THRESHOLD_FEATURE_PG_HEADROOM_UN_SUPPORTED_PLATFORMS'] + tf_data.th3_platforms = hw_constants['TH3_PLATFORMS'] + return tf_data + + +def tf_module_config(config='yes'): + if config == 'yes': + tf_data.vlan = str(random_vlan_list()[0]) + vapi.create_vlan(vars.D1, tf_data.vlan) + vapi.add_vlan_member(vars.D1, tf_data.vlan, port_list=tf_data.port_list, tagging_mode=True) + tf_data.tg, tf_data.tg_ph_list, tf_data.stream_tf_data = tf_tg_stream_config() + tfapi.threshold_feature_debug(vars.D1, mode='port_map') + else: + vapi.delete_vlan_member(vars.D1, tf_data.vlan, port_list=tf_data.port_list, tagging_mode=True) + vapi.delete_vlan(vars.D1, tf_data.vlan) + + +def tf_tg_stream_config(): + st.log('TG configuration for tf tests') + tg_handler = tgapi.get_handles(vars, tf_data.tg_port_list) + tg = tg_handler["tg"] + tg_ph_1 = tg_handler["tg_ph_1"] + tg_ph_2 = tg_handler["tg_ph_2"] + tg_ph_3 = tg_handler["tg_ph_3"] + tg_ph_4 = tg_handler["tg_ph_4"] + tg_ph_list = [tg_ph_1, tg_ph_2, tg_ph_3, tg_ph_4] + + stream_tf_data = {} + + tgapi.traffic_action_control(tg_handler, actions=["reset", "clear_stats"]) + + stream_tf_data['1'] = tg.tg_traffic_config(port_handle=tg_ph_1, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:02", + mac_dst="00:00:00:00:00:01", vlan_id=tf_data.vlan, + l2_encap='ethernet_ii_vlan', frame_size='1500', high_speed_result_analysis=1)['stream_id'] + stream_tf_data['2'] = tg.tg_traffic_config(port_handle=tg_ph_2, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:03", + mac_dst="00:00:00:00:00:01", vlan_id=tf_data.vlan, + l2_encap='ethernet_ii_vlan', frame_size='1500', high_speed_result_analysis=1)['stream_id'] + stream_tf_data['3'] = tg.tg_traffic_config(port_handle=tg_ph_3, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:04", + mac_dst="00:00:00:00:00:01", vlan_id=tf_data.vlan, + l2_encap='ethernet_ii_vlan', frame_size='1500', high_speed_result_analysis=1)['stream_id'] + stream_tf_data['4'] = tg.tg_traffic_config(port_handle=tg_ph_4, mode='create', rate_percent=100, + transmit_mode="continuous", mac_src="00:00:00:00:00:01", + mac_dst="00:00:00:00:00:02", vlan_id=tf_data.vlan, + l2_encap='ethernet_ii_vlan', frame_size='1500', high_speed_result_analysis=1)['stream_id'] + + return tg, tg_ph_list, stream_tf_data + + +def verify_system_map_status(itter_count, delay): + bsapi.get_system_uptime_in_seconds(vars.D1) + if not tfapi.verify_hardware_map_status(vars.D1, tf_data.queues_to_check, itter_count=itter_count, delay=delay): + st.error('Required Threshold Feature Queues are not initialized in the DUT') + report_result(0) + + +def tf_tg_traffic_start_stop(traffic_mode, duration=3, traffic_action=True): + st.log(">>> Configuring '{}' traffic streams".format(traffic_mode)) + st.debug("TG Streams : Current Mode = {}, Requested Mode = {}".format(tf_data.tg_current_mode, traffic_mode)) + if not tf_data.tg_current_mode == traffic_mode: + tf_data.tg_current_mode = traffic_mode + if traffic_mode == tf_data.multicast: + for each in tf_data.stream_tf_data: + tf_data.tg.tg_traffic_config(mode='modify', stream_id=tf_data.stream_tf_data[each], + mac_dst="01:82:33:33:33:33",high_speed_result_analysis=1) + else: + tf_data.tg.tg_traffic_config(mode='modify', stream_id=tf_data.stream_tf_data['1'], + mac_dst="00:00:00:00:00:01",high_speed_result_analysis=1) + tf_data.tg.tg_traffic_config(mode='modify', stream_id=tf_data.stream_tf_data['2'], + mac_dst="00:00:00:00:00:01",high_speed_result_analysis=1) + tf_data.tg.tg_traffic_config(mode='modify', stream_id=tf_data.stream_tf_data['3'], + mac_dst="00:00:00:00:00:01",high_speed_result_analysis=1) + tf_data.tg.tg_traffic_config(mode='modify', stream_id=tf_data.stream_tf_data['4'], + mac_dst="00:00:00:00:00:02",high_speed_result_analysis=1) + tf_data.stream_list = [tf_data.stream_tf_data['1'], tf_data.stream_tf_data['2'],tf_data.stream_tf_data['3'], tf_data.stream_tf_data['4']] + if traffic_action: + tf_data.tg.tg_traffic_control(action='run', stream_handle=tf_data.stream_list) + st.wait(duration) + tf_data.tg.tg_traffic_control(action='stop', stream_handle=tf_data.stream_list) + # Allow the breach event to be handled and written to DB. + st.wait(1) + + +def tf_unconfig(): + tfapi.clear_threshold(vars.D1, breach='all') + tfapi.clear_threshold(vars.D1, threshold_type='priority-group', buffer_type='all', port_alias=[vars.D1T1P1, vars.D1T1P4]) + tfapi.clear_threshold(vars.D1, threshold_type='queue', buffer_type='all', port_alias=[vars.D1T1P1, vars.D1T1P4]) + + +def tf_collecting_debug_logs_when_test_fails(test, delay, traffic_mode): + cutils.banner_log("TEST Failed - Collecting the DEBUG log and prints") + tfapi.threshold_feature_debug(vars.D1, mode=['clear_counters', 'debug_log_enable']) + tf_tg_traffic_start_stop(traffic_mode, traffic_action=False) + + tf_data.tg.tg_traffic_control(action='run', stream_handle=tf_data.stream_list) + st.wait(delay) + tfapi.threshold_feature_debug(vars.D1, mode=['show_counters', 'asic_info', 'show_logging'], + platform=tf_data.platform, test=test) + + tf_data.tg.tg_traffic_control(action='stop', stream_handle=tf_data.stream_list) + tfapi.threshold_feature_debug(vars.D1, mode='debug_log_disable') + + +def report_result(status): + if status: + st.report_pass('test_case_passed') + else: + st.report_fail('test_case_failed') + +@pytest.mark.threshold_ft +@pytest.mark.threshold_ft_cli +def test_ft_tf_pg_thre_con_shared(): + """ + Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) + """ + + tf_data.index = 7 + if tf_data.platform in tf_data.th3_platforms: + tf_data.index = 0 + st.log("Testing with PG{} for PG SHARED test on {}".format(tf_data.index, tf_data.platform)) + tf_data.threshold = 4 + + count = 1 + while True: + result = 1 + result2 = 1 + + cutils.banner_log("TEST Starts for iteration - {}".format(count)) + + config_parameter = {"threshold_type": 'priority-group', "buffer_type": 'shared', "port_alias": vars.D1T1P1, + "pg{}".format(tf_data.index): tf_data.threshold} + + st.log("PG shared threshold config") + tfapi.config_threshold(vars.D1, threshold_type='priority-group', port_alias=vars.D1T1P1, index=tf_data.index, + buffer_type='shared', value=tf_data.threshold) + st.log("PG shared threshold config verify") + if not tfapi.verify_threshold(vars.D1, **config_parameter): + st.error("Unable to configure the PG index and corresponding threshold value on PG shared buffer") + result = 0 + + st.log("Traffic start and stop") + tf_tg_traffic_start_stop(tf_data.unicast, tf_data.traffic_duration) + + st.log("Checking PG shared breach event") + if not tfapi.verify_threshold_breaches(vars.D1, buffer='priority-group', port=vars.D1T1P1, index=tf_data.index, + threshold_type='shared'): + st.error("PG shared threshold breach Event is not found") + if tf_data.need_debug_prints: + tf_collecting_debug_logs_when_test_fails('shared', tf_data.traffic_duration, tf_data.unicast) + result = 0 + result2 = 0 + + st.log("Clear PG shared threshold breach") + tfapi.clear_threshold(vars.D1, breach='all') + + st.log("Checking PG shared breach event") + if tfapi.verify_threshold_breaches(vars.D1, buffer='priority-group', port=vars.D1T1P1, index=tf_data.index, + threshold_type='shared'): + st.error("Post clear - PG shared threshold breach Event is found") + result = 0 + + st.log("PG shared threshold config clear") + tfapi.clear_threshold(vars.D1, threshold_type='priority-group', port_alias=vars.D1T1P1, index=tf_data.index, + buffer_type='shared') + st.log("PG shared threshold config verify") + if tfapi.verify_threshold(vars.D1, **config_parameter): + st.error("Unable to configure the PG index and corresponding shared threshold value") + result = 0 + + if not result2 and tf_data.need_debug_prints: + st.log("As Breach events are not observed collecting logs by disabling the Thresholds") + tf_collecting_debug_logs_when_test_fails('shared', tf_data.traffic_duration, tf_data.unicast) + + tfapi.clear_threshold(vars.D1, breach='all') + + if result: + st.log("Test PASSED in Iteration {}.".format(count)) + report_result(result) + break + + if count == tf_data.test_max_retries_count: + st.log("Test Failed in all {} Iterations. Hence Declaring as FAIL".format(count)) + report_result(result) + + st.log("Test Failed in the Iteration-{}. Hence re-testing".format(count)) + count += 1 + + +@pytest.mark.threshold_ft +@pytest.mark.threshold_ft_cli +def test_ft_tf_queue_thre_con_unicast(): + """ + Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) + """ + + tf_data.index = 0 + tf_data.threshold = 20 + count = 1 + while True: + result = 1 + result2 = 1 + + cutils.banner_log("TEST Starts for iteration - {}".format(count)) + + st.log("Unicast queue threshold config") + tfapi.config_threshold(vars.D1, threshold_type='queue', port_alias=vars.D1T1P4, index=tf_data.index, + buffer_type='unicast', value=tf_data.threshold) + st.log("Unicast queue threshold config verify") + if not tfapi.verify_threshold(vars.D1, threshold_type='queue', buffer_type='unicast', + port_alias=vars.D1T1P4, uc0=tf_data.threshold): + st.error("Unable to configure unicast queue threshold value on unicast-queue buffer") + result = 0 + + st.log("Traffic start and stop") + tf_tg_traffic_start_stop(tf_data.unicast, tf_data.traffic_duration) + + st.log("Checking unicast queue breach event") + if not tfapi.verify_threshold_breaches(vars.D1, buffer='queue', port=vars.D1T1P4, index=tf_data.index, + threshold_type='unicast'): + st.error("Unicast queue threshold breach Event is not found") + if tf_data.need_debug_prints: + tf_collecting_debug_logs_when_test_fails('unicast', tf_data.traffic_duration, tf_data.unicast) + result = 0 + result2 = 0 + + st.log("Clear Unicast queue threshold breach") + tfapi.clear_threshold(vars.D1, breach='all') + + st.log("Checking unicast queue breach event") + if tfapi.verify_threshold_breaches(vars.D1, buffer='queue', port=vars.D1T1P4, index=tf_data.index, + threshold_type='unicast'): + st.error("Post clear - Unicast queue threshold breach Event is found") + result = 0 + + st.log("Unicast queue threshold config clear") + tfapi.clear_threshold(vars.D1, threshold_type='queue', port_alias=vars.D1T1P4, index=tf_data.index, + buffer_type='unicast') + st.log("Unicast queue threshold config verify") + if tfapi.verify_threshold(vars.D1, threshold_type='queue', buffer_type='unicast', + port_alias=vars.D1T1P4, uc0=tf_data.threshold): + st.error("Unable to configure unicast queue threshold value") + result = 0 + + if not result2 and tf_data.need_debug_prints: + st.log("As Breach events are not observed collecting logs by disabling the Thresholds") + tf_collecting_debug_logs_when_test_fails('unicast', tf_data.traffic_duration, tf_data.unicast) + + tfapi.clear_threshold(vars.D1, breach='all') + if result: + st.log("Test PASSED in Iteration {}.".format(count)) + report_result(result) + break + + if count == tf_data.test_max_retries_count: + st.log("Test Failed in all {} Iterations. Hence Declaring as FAIL".format(count)) + report_result(result) + + st.log("Test Failed in the Iteration-{}. Hence re-testing".format(count)) + count += 1 + + +@pytest.mark.threshold_ft +@pytest.mark.threshold_ft_cli +def test_ft_tf_queue_thre_con_multicast(): + """ + Author : Prudvi Mangadu (prudvi.mangadu@broadcom.com) + """ + + tf_data.index = 0 + tf_data.threshold = 2 + count = 1 + while True: + result = 1 + result2 = 1 + + cutils.banner_log("TEST Starts for iteration - {}".format(count)) + + st.log("Multicast queue threshold config") + tfapi.config_threshold(vars.D1, threshold_type='queue', port_alias=vars.D1T1P4, index=tf_data.index, + buffer_type='multicast', value=tf_data.threshold) + st.log("Multicast queue threshold config verify") + if not tfapi.verify_threshold(vars.D1, threshold_type='queue', buffer_type='multicast', + port_alias=vars.D1T1P4, mc0=tf_data.threshold): + st.error("Unable to configure multicast queue threshold value on multicast-queue buffer") + result = 0 + + st.log("Traffic start and stop") + tf_tg_traffic_start_stop(tf_data.multicast, tf_data.traffic_duration+7) + + st.log("Checking multicast queue breach event") + if not tfapi.verify_threshold_breaches(vars.D1, buffer='queue', port=vars.D1T1P4, index=tf_data.index, + threshold_type='multicast'): + st.error("Multicast queue threshold breach Event is not found") + if tf_data.need_debug_prints: + tf_collecting_debug_logs_when_test_fails('multicast', tf_data.traffic_duration+7, tf_data.multicast) + result = 0 + result2 = 0 + + st.log("Clear Multicast queue threshold breach") + tfapi.clear_threshold(vars.D1, breach='all') + + st.log("Checking multicast queue breach event") + if tfapi.verify_threshold_breaches(vars.D1, buffer='multicast', port=vars.D1T1P4, index=tf_data.index, + threshold_type='queue'): + st.error("Post clear - Multicast queue threshold breach Event is found") + result = 0 + + st.log("Multicast queue threshold config clear") + tfapi.clear_threshold(vars.D1, threshold_type='queue', port_alias=vars.D1T1P4, index=tf_data.index, + buffer_type='multicast') + st.log("Multicast queue threshold config verify") + if tfapi.verify_threshold(vars.D1, threshold_type='queue', buffer_type='multicast', + port_alias=vars.D1T1P4, mc0=tf_data.threshold): + st.error("Unable to configure multicast queue threshold value") + result = 0 + + if not result2 and tf_data.need_debug_prints : + st.log("As Breach events are not observed collecting logs by disabling the Thresholds") + tf_collecting_debug_logs_when_test_fails('multicast', tf_data.traffic_duration, tf_data.multicast) + + tfapi.clear_threshold(vars.D1, breach='all') + if result: + st.log("Test PASSED in Iteration {}.".format(count)) + report_result(result) + break + + if count == tf_data.test_max_retries_count: + st.log("Test Failed in all {} Iterations. Hence Declaring as FAIL".format(count)) + report_result(result) + + st.log("Test Failed in the Iteration-{}. Hence re-testing".format(count)) + count += 1 diff --git a/spytest/utilities/common.py b/spytest/utilities/common.py index f340c35daf8..40beff3fee4 100644 --- a/spytest/utilities/common.py +++ b/spytest/utilities/common.py @@ -134,10 +134,10 @@ def make_list2(*args): retval.append(arg) return retval -def is_iterable(obj): - try: _ = iter(obj) - except TypeError: return False - return True +def iterable(obj): + if obj is None or isinstance(obj, bool): + return [] + return obj def filter_and_select(output, select=None, match=None): """ @@ -175,10 +175,9 @@ def select_entry(ent, select): # collect the matched/all entries retval = [] - if is_iterable(output): - for ent in output: - if not match or match_entry(ent, match): - retval.append(ent) + for ent in iterable(output): + if not match or match_entry(ent, match): + retval.append(ent) # return all columns if select is not specified if not select: @@ -248,8 +247,11 @@ def random_vlan_list(count=1, exclude=[]): def get_proc_name(): return sys._getframe(1).f_code.co_name -def get_line_number(): +def get_line_number(lvl=0): cf = inspect.currentframe() + for _ in range(lvl): + if cf.f_back: + cf = cf.f_back return cf.f_back.f_lineno def trace(fmt, *args): @@ -379,17 +381,11 @@ def time_parse(timestr): return secs def time_format(value, msec=False): - if msec: - milli, value = value % 1000, value // 1000 - seconds = value % (24 * 3600) - hour = seconds // 3600 - seconds %= 3600 - minutes = seconds // 60 - seconds %= 60 - if msec: - retval = "%d:%02d:%02d.%03d" % (hour, minutes, seconds, milli) - else: - retval = "%d:%02d:%02d" % (hour, minutes, seconds) + if msec: milli, value = value % 1000, value // 1000 + minutes, seconds = divmod(value, 60) + hour, minutes = divmod(minutes, 60) + retval = "%d:%02d:%02d" % (hour, minutes, seconds) + if msec: retval = "%s.%03d" % (retval, milli) return retval def time_diff(start, end, fmt=False, add=0): @@ -585,6 +581,7 @@ def write_html_table3(cols, rows, filepath=None, links=None, colors=None, align= +