diff --git a/ansible/roles/test/files/ptftests/fg_ecmp_test.py b/ansible/roles/test/files/ptftests/fg_ecmp_test.py new file mode 100644 index 00000000000..2151e1fb928 --- /dev/null +++ b/ansible/roles/test/files/ptftests/fg_ecmp_test.py @@ -0,0 +1,409 @@ +# PTF test contains the test cases for fine grained ecmp, the scenarios of test are as follows: +# create_flows: Sends NUM_FLOWS flows with varying src_Ip and creates a tuple to port map +# initial_hash_check: Checks the the flows from create_flows still end up at the same port +# withdraw_nh: Withdraw next-hop in one fg nhg bank, and make sure flow redistributes to ports in the fg nhg bank +# add_nh: Add next-hop in one fg nhg bank, and make sure flow redistributes to from ports in same fg nhg bank to added port +# withdraw_bank: Withdraw all next-hops which constitue a bank, and make sure that flows migrate to using the other bank +# add_first_nh: Add 1st next-hop from previously withdrawn bank, and make sure that some flow migrate back to using the next-hop in old bank + + +import ipaddress +import logging +import random +import time +import os +import json +import ipaddress + +import ptf +import ptf.packet as scapy + +from ptf.base_tests import BaseTest +from ptf.mask import Mask +import ptf.testutils as testutils +from ptf.testutils import * + +PERSIST_MAP = '/tmp/fg_ecmp_persist_map.json' + +class FgEcmpTest(BaseTest): + + def __init__(self): + ''' + @summary: constructor + ''' + BaseTest.__init__(self) + self.test_params = test_params_get() + + #--------------------------------------------------------------------- + def log(self, message): + logging.info(message) + + + def trigger_mac_learning(self, ip_to_port): + for src_ip, src_port in ip_to_port.items(): + pkt = simple_eth_packet( + eth_dst=self.router_mac, + eth_src=self.dataplane.get_mac(0, src_port), + eth_type=0x1234) + + send_packet(self, src_port, pkt) + + + def setUp(self): + ''' + @summary: Setup for the test + ''' + self.dataplane = ptf.dataplane_instance + self.test_params = testutils.test_params_get() + self.max_deviation = 0.25 + if 'test_case' in self.test_params: + self.test_case = self.test_params['test_case'] + else: + raise Exception("Need a test case param") + + if self.test_case == 'withdraw_nh': + self.withdraw_nh_port = self.test_params['withdraw_nh_port'] + elif self.test_case == 'add_nh': + self.add_nh_port = self.test_params['add_nh_port'] + elif self.test_case == 'withdraw_bank': + self.withdraw_nh_bank = self.test_params['withdraw_nh_bank'] + elif self.test_case == 'add_first_nh': + self.first_nh = self.test_params['first_nh'] + + if 'config_file' not in self.test_params: + raise Exception("required parameter 'config_file' is not present") + config = self.test_params['config_file'] + + if 'exp_flow_count' not in self.test_params: + raise Exception("required parameter 'exp_flow_count' is not present") + self.exp_flow_count = self.test_params['exp_flow_count'] + + if not os.path.isfile(config): + raise Exception("the config file %s doesn't exist" % config) + + with open(config) as fp: + graph = json.load(fp) + + self.net_ports = graph['net_ports'] + self.exp_ports = graph['port_list'] + self.exp_port_set_one = graph['bank_0_port'] + self.exp_port_set_two = graph['bank_1_port'] + self.dst_ip = graph['dst_ip'] + self.router_mac = graph['dut_mac'] + self.ip_to_port = graph['ip_to_port'] + self.num_flows = graph['num_flows'] + self.inner_hashing = graph['inner_hashing'] + + self.log(self.net_ports) + self.log(self.exp_ports) + self.log(self.exp_port_set_one) + self.log(self.exp_port_set_two) + self.log(self.dst_ip) + self.log(self.router_mac) + self.log(self.test_case) + self.log(self.ip_to_port) + self.log(self.num_flows) + self.log(self.inner_hashing) + self.log(self.exp_flow_count) + + self.trigger_mac_learning(self.ip_to_port) + time.sleep(3) + + + #--------------------------------------------------------------------- + def test_balancing(self, hit_count_map): + for port, exp_flows in self.exp_flow_count.items(): + assert port in hit_count_map + num_flows = hit_count_map[port] + deviation = float(num_flows)/float(exp_flows) + deviation = abs(1-deviation) + self.log("port "+ str(port) + " exp_flows " + str(exp_flows) + + " num_flows " + str(num_flows) + " deviation " + str(deviation)) + assert deviation <= self.max_deviation + + + def fg_ecmp(self): + ipv4 = isinstance(ipaddress.ip_address(self.dst_ip.decode('utf8')), + ipaddress.IPv4Address) + + if self.inner_hashing: + base_ip = ipaddress.ip_address(u'8.0.0.0') + else: + if isinstance(ipaddress.ip_address(self.dst_ip.decode('utf8')), ipaddress.IPv4Address): + base_ip = ipaddress.ip_address(u'8.0.0.0') + else: + base_ip = ipaddress.ip_address(u'20D0:A800:0:00::') + + # initialize all parameters + if self.inner_hashing: + dst_ip = '5.5.5.5' + else: + dst_ip = self.dst_ip + src_port = 20000 + dst_port = 30000 + + tuple_to_port_map ={} + hit_count_map = {} + + if self.test_case == 'create_flows': + # Send packets with varying src_ips to create NUM_FLOWS unique flows + # and generate a flow to port map + self.log("Creating flow to port map ...") + for i in range(0, self.num_flows): + src_ip = str(base_ip + i) + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + tuple_to_port_map[src_ip] = port_idx + self.test_balancing(hit_count_map) + + json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) + return + + elif self.test_case == 'initial_hash_check': + with open(PERSIST_MAP) as fp: + tuple_to_port_map = json.load(fp) + assert tuple_to_port_map + # step 2: Send the same flows once again and verify that they end up on the same port + self.log("Ensure that flow to port map is maintained when the same flow is re-sent...") + for src_ip, port in tuple_to_port_map.iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + assert port_idx == port + return + + elif self.test_case == 'withdraw_nh': + self.log("Withdraw next-hop " + str(self.withdraw_nh_port) + " and ensure hash redistribution within correct bank") + with open(PERSIST_MAP) as fp: + tuple_to_port_map = json.load(fp) + assert tuple_to_port_map + if self.withdraw_nh_port in self.exp_port_set_one: + withdraw_port_grp = self.exp_port_set_one + else: + withdraw_port_grp = self.exp_port_set_two + hit_count_map = {} + for src_ip, port in tuple_to_port_map.iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + assert port_idx != self.withdraw_nh_port + if port == self.withdraw_nh_port: + assert port_idx != self.withdraw_nh_port + assert (port_idx in withdraw_port_grp) + tuple_to_port_map[src_ip] = port_idx + else: + assert port_idx == port + + self.test_balancing(hit_count_map) + + json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) + return + + elif self.test_case == 'add_nh': + self.log("Add next-hop " + str(self.add_nh_port) + " and ensure hash redistribution within correct bank") + with open(PERSIST_MAP) as fp: + tuple_to_port_map = json.load(fp) + assert tuple_to_port_map + if self.add_nh_port in self.exp_port_set_one: + add_port_grp = self.exp_port_set_one + else: + add_port_grp = self.exp_port_set_two + hit_count_map = {} + for src_ip, port in tuple_to_port_map.iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + if port_idx == self.add_nh_port: + assert (port in add_port_grp) + else: + assert port_idx == port + + self.test_balancing(hit_count_map) + + json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) + return + + elif self.test_case == 'withdraw_bank': + self.log("Withdraw bank " + str(self.withdraw_nh_bank) + " and ensure hash redistribution is as expected") + with open(PERSIST_MAP) as fp: + tuple_to_port_map = json.load(fp) + assert tuple_to_port_map + if self.withdraw_nh_bank[0] in self.exp_port_set_one: + active_port_grp = self.exp_port_set_two + else: + active_port_grp = self.exp_port_set_one + hit_count_map = {} + for src_ip, port in tuple_to_port_map.iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + if port in self.withdraw_nh_bank: + assert (port_idx in active_port_grp) + tuple_to_port_map[src_ip] = port_idx + else: + assert port_idx == port + + self.test_balancing(hit_count_map) + + json.dump(tuple_to_port_map, open(PERSIST_MAP,"w")) + return + + elif self.test_case == 'add_first_nh': + self.log("Add 1st next-hop " + str(self.first_nh) + " and ensure hash redistribution is as expected") + with open(PERSIST_MAP) as fp: + tuple_to_port_map = json.load(fp) + if self.first_nh in self.exp_port_set_one: + active_port_grp = self.exp_port_set_two + else: + active_port_grp = self.exp_port_set_one + + assert tuple_to_port_map + hit_count_map = {} + for src_ip, port in tuple_to_port_map.iteritems(): + if self.inner_hashing: + in_port = random.choice(self.net_ports) + else: + in_port = self.net_ports[0] + (port_idx, _) = self.send_rcv_ip_pkt( + in_port, src_port, dst_port, src_ip, dst_ip, self.exp_ports, ipv4) + hit_count_map[port_idx] = hit_count_map.get(port_idx, 0) + 1 + flow_redistribution_in_correct_grp = False + if port_idx in active_port_grp: + assert port_idx == port + flow_redistribution_in_correct_grp = True + elif port_idx == self.first_nh: + flow_redistribution_in_correct_grp = True + tuple_to_port_map[src_ip] = port_idx + assert flow_redistribution_in_correct_grp == True + + self.test_balancing(hit_count_map) + return + + else: + self.log("Unsupported testcase " + self.test_case) + return + + + def send_rcv_ip_pkt(self, in_port, sport, dport, src_ip_addr, dst_ip_addr, + dst_port_list, ipv4=True): + if ipv4: + (matched_index, received) = self.send_rcv_ipv4_pkt(in_port, sport, dport, + src_ip_addr, dst_ip_addr, dst_port_list) + else: + (matched_index, received) = self.send_rcv_ipv6_pkt(in_port, sport, dport, + src_ip_addr, dst_ip_addr, dst_port_list) + + assert received + + matched_port = dst_port_list[matched_index] + logging.info("Received packet at " + str(matched_port)) + + return (matched_port, received) + + + def send_rcv_ipv4_pkt(self, in_port, sport, dport, + ip_src, ip_dst, dst_port_list): + src_mac = self.dataplane.get_mac(0, in_port) + rand_int = random.randint(1, 254) + + pkt = simple_tcp_packet( + eth_dst=self.router_mac, + eth_src=src_mac, + ip_src=ip_src, + ip_dst=ip_dst, + tcp_sport=sport, + tcp_dport=dport, + ip_ttl=64) + if self.inner_hashing: + pkt = simple_vxlan_packet( + eth_dst=self.router_mac, + eth_src=src_mac, + ip_id=0, + ip_src='2.2.2.' + str(rand_int), + ip_dst=self.dst_ip, + ip_ttl=64, + udp_sport=rand_int, + udp_dport=4789, + vxlan_vni=rand_int, + with_udp_chksum=False, + inner_frame=pkt) + + send_packet(self, in_port, pkt) + + masked_exp_pkt = Mask(pkt) + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") + + return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + + + def send_rcv_ipv6_pkt(self, in_port, sport, dport, + ip_src, ip_dst, dst_port_list): + src_mac = self.dataplane.get_mac(0, in_port) + rand_int = random.randint(1, 254) + + if self.inner_hashing: + pkt = simple_tcp_packet( + eth_dst=self.router_mac, + eth_src=src_mac, + ip_src=ip_src, + ip_dst=ip_dst, + tcp_sport=sport, + tcp_dport=dport, + ip_ttl=64) + pkt = simple_vxlanv6_packet( + eth_dst=self.router_mac, + eth_src=src_mac, + ipv6_src='2:2:2::' + str(rand_int), + ipv6_dst=self.dst_ip, + udp_sport=rand_int, + udp_dport=4789, + vxlan_vni=rand_int, + with_udp_chksum=False, + inner_frame=pkt) + else: + pkt = simple_tcpv6_packet( + eth_dst=self.router_mac, + eth_src=src_mac, + ipv6_dst=ip_dst, + ipv6_src=ip_src, + tcp_sport=sport, + tcp_dport=dport, + ipv6_hlim=64) + + send_packet(self, in_port, pkt) + + masked_exp_pkt = Mask(pkt) + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") + masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src") + masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6, "hlim") + + return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + + + #--------------------------------------------------------------------- + def runTest(self): + # Main function which triggers all the tests + self.fg_ecmp() diff --git a/tests/ecmp/test_fgnhg.py b/tests/ecmp/test_fgnhg.py new file mode 100644 index 00000000000..f10068d06bd --- /dev/null +++ b/tests/ecmp/test_fgnhg.py @@ -0,0 +1,342 @@ +import pytest +from datetime import datetime + +import time +import logging +import ipaddress +import json +from tests.ptf_runner import ptf_runner +from tests.common import config_reload + +from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] +from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] + +# Constants +NUM_NHs = 8 +DEFAULT_VLAN_ID = 1000 +DEFAULT_VLAN_IPv4 = ipaddress.ip_network(u'200.200.200.0/28') +DEFAULT_VLAN_IPv6 = ipaddress.ip_network(u'200:200:200:200::/124') +PREFIX_IPv4 = u'100.50.25.12/32' +PREFIX_IPv6 = u'fc:05::/128' +ARP_CFG = '/tmp/arp_cfg.json' +FG_ECMP_CFG = '/tmp/fg_ecmp.json' +USE_INNER_HASHING = False +NUM_FLOWS = 1000 + +SUPPORTED_TOPO = ['t0'] +SUPPORTED_PLATFORMS = ['mellanox'] + +logger = logging.getLogger(__name__) + +def configure_interfaces(cfg_facts, duthost, ptfhost, ptfadapter, vlan_ip): + config_port_indices = cfg_facts['port_index_map'] + port_list = [] + eth_port_list = [] + ip_to_port = {} + bank_0_port = [] + bank_1_port = [] + + vlan_members = cfg_facts.get('VLAN_MEMBER', {}) + print vlan_members + index = 0 + for vlan in cfg_facts['VLAN_MEMBER'].keys(): + vlan_id = vlan[4:] + DEFAULT_VLAN_ID = int(vlan_id) + if len(port_list) == NUM_NHs: + break + for port in vlan_members[vlan]: + if len(port_list) == NUM_NHs: + break + ptf_port_id = config_port_indices[port] + port_list.append(ptf_port_id) + eth_port_list.append(port) + index = index + 1 + + port_list.sort() + bank_0_port = port_list[:len(port_list)/2] + bank_1_port = port_list[len(port_list)/2:] + + # Create vlan if + duthost.command('config interface ip add Vlan' + str(DEFAULT_VLAN_ID) + ' ' + str(vlan_ip)) + + for index, ip in enumerate(vlan_ip.hosts()): + if len(ip_to_port) == NUM_NHs: + break + ip_to_port[str(ip)] = port_list[index] + + return port_list, ip_to_port, bank_0_port, bank_1_port + + +def generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port, prefix): + if isinstance(ipaddress.ip_network(prefix), ipaddress.IPv4Network): + fgnhg_name = 'fgnhg_v4' + else: + fgnhg_name = 'fgnhg_v6' + + fgnhg_data = {} + + fgnhg_data['FG_NHG'] = {} + fgnhg_data['FG_NHG'][fgnhg_name] = { + "bucket_size": 125 + } + + fgnhg_data['FG_NHG_MEMBER'] = {} + for ip, port in ip_to_port.items(): + bank = "0" + if port in bank_1_port: + bank = "1" + fgnhg_data['FG_NHG_MEMBER'][ip] = { + "bank": bank, + "FG_NHG": fgnhg_name + } + + fgnhg_data['FG_NHG_PREFIX'] = {} + fgnhg_data['FG_NHG_PREFIX'][prefix] = { + "FG_NHG": fgnhg_name + } + + logger.info("fgnhg entries programmed to DUT " + str(fgnhg_data)) + duthost.copy(content=json.dumps(fgnhg_data, indent=2), dest="/tmp/fgnhg.json") + duthost.shell("sonic-cfggen -j /tmp/fgnhg.json --write-to-db") + + +def setup_neighbors(duthost, ptfhost, ip_to_port): + vlan_name = "Vlan"+ str(DEFAULT_VLAN_ID) + neigh_entries = {} + neigh_entries['NEIGH'] = {} + + for ip, port in ip_to_port.items(): + + if isinstance(ipaddress.ip_address(ip.decode('utf8')), ipaddress.IPv4Address): + neigh_entries['NEIGH'][vlan_name + "|" + ip] = { + "neigh": ptfhost.shell("cat /sys/class/net/eth" + str(port) + "/address")["stdout_lines"][0], + "family": "IPv4" + } + else: + neigh_entries['NEIGH'][vlan_name + "|" + ip] = { + "neigh": ptfhost.shell("cat /sys/class/net/eth" + str(port) + "/address")["stdout_lines"][0], + "family": "IPv6" + } + + logger.info("neigh entries programmed to DUT " + str(neigh_entries)) + duthost.copy(content=json.dumps(neigh_entries, indent=2), dest="/tmp/neigh.json") + duthost.shell("sonic-cfggen -j /tmp/neigh.json --write-to-db") + + +def create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_port, router_mac, net_ports, prefix): + fg_ecmp = { + "ip_to_port": ip_to_port, + "port_list": port_list, + "bank_0_port": bank_0_port, + "bank_1_port": bank_1_port, + "dut_mac": router_mac, + "dst_ip": prefix.split('/')[0], + "net_ports": net_ports, + "inner_hashing": USE_INNER_HASHING, + "num_flows": NUM_FLOWS + } + + logger.info("fg_ecmp config sent to PTF: " + str(fg_ecmp)) + ptfhost.copy(content=json.dumps(fg_ecmp, indent=2), dest=FG_ECMP_CFG) + + +def setup_test_config(ptfadapter, duthost, ptfhost, cfg_facts, router_mac, net_ports, vlan_ip, prefix): + port_list, ip_to_port, bank_0_port, bank_1_port = configure_interfaces(cfg_facts, duthost, ptfhost, ptfadapter, vlan_ip) + generate_fgnhg_config(duthost, ip_to_port, bank_0_port, bank_1_port, prefix) + time.sleep(60) + setup_neighbors(duthost, ptfhost, ip_to_port) + create_fg_ptf_config(ptfhost, ip_to_port, port_list, bank_0_port, bank_1_port, router_mac, net_ports, prefix) + return port_list, ip_to_port, bank_0_port, bank_1_port + + +def fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, prefix): + + if isinstance(ipaddress.ip_network(prefix), ipaddress.IPv4Network): + ipcmd = "ip route" + else: + ipcmd = "ipv6 route" + + for nexthop in ip_to_port: + duthost.shell("vtysh -c 'configure terminal' -c '{} {} {}'".format(ipcmd, prefix, nexthop)) + time.sleep(1) + + test_time = str(datetime.now().strftime('%Y-%m-%d-%H:%M:%S')) + + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.create_flows.log".format(test_time) + + exp_flow_count = {} + flows_per_nh = NUM_FLOWS/len(port_list) + for port in port_list: + exp_flow_count[port] = flows_per_nh + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params={"test_case": 'create_flows', + "exp_flow_count": exp_flow_count, + "config_file": FG_ECMP_CFG}, + qlen=1000, + log_file=log_file) + + + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.initial_hash_check.log".format(test_time) + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params={"test_case": 'initial_hash_check', + "exp_flow_count": exp_flow_count, + "config_file": FG_ECMP_CFG}, + qlen=1000, + log_file=log_file) + + exp_flow_count = {} + flows_for_withdrawn_nh_bank = (NUM_FLOWS/2)/(len(bank_0_port) - 1) + withdraw_nh_port = bank_0_port[1] + for port in bank_1_port: + exp_flow_count[port] = flows_per_nh + for port in bank_0_port: + if port != withdraw_nh_port: + exp_flow_count[port] = flows_for_withdrawn_nh_bank + + for nexthop, port in ip_to_port.items(): + if port == withdraw_nh_port: + duthost.shell("vtysh -c 'configure terminal' -c 'no {} {} {}'".format(ipcmd, prefix, nexthop)) + + + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.withdraw_nh.log".format(test_time) + + time.sleep(1) + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params={"test_case": 'withdraw_nh', + "config_file": FG_ECMP_CFG, + "exp_flow_count": exp_flow_count, + "withdraw_nh_port": withdraw_nh_port}, + qlen=1000, + log_file=log_file) + + + exp_flow_count = {} + for port in port_list: + exp_flow_count[port] = flows_per_nh + + for nexthop, port in ip_to_port.items(): + if port == withdraw_nh_port: + duthost.shell("vtysh -c 'configure terminal' -c '{} {} {}'".format(ipcmd, prefix, nexthop)) + + + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.add_nh.{}.log".format(test_time) + + time.sleep(1) + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params={"test_case": 'add_nh', + "config_file": FG_ECMP_CFG, + "exp_flow_count": exp_flow_count, + "add_nh_port": withdraw_nh_port}, + qlen=1000, + log_file=log_file) + + + withdraw_nh_bank = bank_0_port + for nexthop, port in ip_to_port.items(): + if port in withdraw_nh_bank: + duthost.shell("vtysh -c 'configure terminal' -c 'no {} {} {}'".format(ipcmd, prefix, nexthop)) + + + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.withdraw_bank.log".format(test_time) + + time.sleep(1) + + exp_flow_count = {} + flows_per_nh = NUM_FLOWS/len(bank_1_port) + for port in bank_1_port: + exp_flow_count[port] = flows_per_nh + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params={"test_case": 'withdraw_bank', + "config_file": FG_ECMP_CFG, + "exp_flow_count": exp_flow_count, + "withdraw_nh_bank": withdraw_nh_bank}, + qlen=1000, + log_file=log_file) + + first_nh = bank_0_port[3] + for nexthop, port in ip_to_port.items(): + if port == first_nh: + duthost.shell("vtysh -c 'configure terminal' -c '{} {} {}'".format(ipcmd, prefix, nexthop)) + + log_file = "/tmp/fg_ecmp_test.FgEcmpTest.{}.add_first_nh.log".format(test_time) + + time.sleep(1) + + exp_flow_count = {} + flows_per_nh = (NUM_FLOWS/2)/(len(bank_1_port)) + for port in bank_1_port: + exp_flow_count[port] = flows_per_nh + + exp_flow_count[first_nh] = NUM_FLOWS/2 + + ptf_runner(ptfhost, + "ptftests", + "fg_ecmp_test.FgEcmpTest", + platform_dir="ptftests", + params={"test_case": 'add_first_nh', + "config_file": FG_ECMP_CFG, + "exp_flow_count": exp_flow_count, + "first_nh": first_nh}, + qlen=1000, + log_file=log_file) + + +def cleanup(duthost): + config_reload(duthost) + + +@pytest.fixture(scope="module") +def common_setup_teardown(tbinfo, duthost): + if tbinfo['topo']['name'] not in SUPPORTED_TOPO: + logger.warning("Unsupported topology, currently supports " + str(SUPPORTED_TOPO)) + pytest.skip("Unsupported topology") + if duthost.facts["asic_type"] not in SUPPORTED_PLATFORMS: + logger.warning("Unsupported platform, currently supports " + str(SUPPORTED_PLATFORMS)) + pytest.skip("Unsupported platform") + + try: + host_facts = duthost.setup()['ansible_facts'] + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + router_mac = host_facts['ansible_Ethernet0']['macaddress'] + net_ports = [] + for name, val in mg_facts['minigraph_portchannels'].items(): + members = [mg_facts['minigraph_port_indices'][member] for member in val['members']] + net_ports.extend(members) + yield duthost, cfg_facts, router_mac, net_ports + + finally: + cleanup(duthost) + + +def test_fg_ecmp(common_setup_teardown, ptfadapter, ptfhost): + duthost, cfg_facts, router_mac, net_ports = common_setup_teardown + + # IPv4 test + port_list, ip_to_port, bank_0_port, bank_1_port = setup_test_config(ptfadapter, duthost, ptfhost, cfg_facts, router_mac, net_ports, DEFAULT_VLAN_IPv4, PREFIX_IPv4) + fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, PREFIX_IPv4) + + # IPv6 test + port_list, ip_to_port, bank_0_port, bank_1_port = setup_test_config(ptfadapter, duthost, ptfhost, cfg_facts, router_mac, net_ports, DEFAULT_VLAN_IPv6, PREFIX_IPv6) + fg_ecmp(ptfhost, duthost, router_mac, net_ports, port_list, ip_to_port, bank_0_port, bank_1_port, PREFIX_IPv6)