diff --git a/ansible/library/exabgp.py b/ansible/library/exabgp.py index bcb0ceb00da..e8603bccffa 100644 --- a/ansible/library/exabgp.py +++ b/ansible/library/exabgp.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python import re diff --git a/ansible/roles/test/files/ptftests/IP_decap_test.py b/ansible/roles/test/files/ptftests/IP_decap_test.py index 9ce87cd8b0e..7cf4c882e6b 100644 --- a/ansible/roles/test/files/ptftests/IP_decap_test.py +++ b/ansible/roles/test/files/ptftests/IP_decap_test.py @@ -345,7 +345,20 @@ def run_encap_combination_test(self, outer_pkt_type, inner_pkt_type): else: raise Exception('ERROR: Invalid inner packet type passed: ', inner_pkt_type) - for ip_range in ip_ranges: + ip_ranges_length = len(ip_ranges) + if ip_ranges_length > 150: + # This is to limit the test execution time. Because the IP ranges in the head and tail of the list are + # kind of special. We need to always cover them. The IP ranges in the middle are not fundamentally + # different. We can just sample some IP ranges in the middle. Using this method, test coverage is not + # compromized. Test execution time can be reduced from over 5000 seconds to around 300 seconds. + last_ten_index = ip_ranges_length - 10 + covered_ip_ranges = ip_ranges[:100] + \ + random.sample(ip_ranges[100:last_ten_index], 40) + \ + ip_ranges[last_ten_index:] + else: + covered_ip_ranges = ip_ranges[:] + + for ip_range in covered_ip_ranges: # Skip the IP range on VLAN interface, t0 topology if inner_pkt_type == 'ipv4' and self.vlan_ip and \ diff --git a/ansible/roles/vm_set/library/announce_routes.py b/ansible/roles/vm_set/library/announce_routes.py new file mode 100644 index 00000000000..0bfb763bb09 --- /dev/null +++ b/ansible/roles/vm_set/library/announce_routes.py @@ -0,0 +1,266 @@ +#!/usr/bin/env python + +import math +import os +import yaml +import re +import requests + +from ansible.module_utils.basic import * + +DOCUMENTATION = ''' +module: announce_routes +short_description: announce routes to exabgp processes running in PTF container +description: Announce routes to exabgp processes running in PTF container. This module must be executed on localhost + which is the sonic-mgmt container. + +Options: + - option-name: topo_name + description: topology name + required: True + + - option-name: ptf_ip + description: PTF container management IP address + required: True +''' + +EXAMPLES = ''' + - name: Announce routes + announce_routes: + topo_name: "t1-lag" + ptf_ip: "192.168.1.10" + delegate_to: localhost +''' + +TOPO_FILE_FOLDER = 'vars/' +TOPO_FILENAME_TEMPLATE = 'topo_{}.yml' + +PODSET_NUMBER = 200 +TOR_NUMBER = 16 +TOR_SUBNET_NUMBER = 2 +MAX_TOR_SUBNET_NUMBER = 16 +TOR_SUBNET_SIZE = 128 +NHIPV4 = '10.10.246.254' +NHIPV6 = 'fc0a::ff' +SPINE_ASN = 65534 +LEAF_ASN_START = 64600 +TOR_ASN_START = 65500 +IPV4_BASE_PORT = 5000 +IPV6_BASE_PORT = 6000 + + +def get_topo_type(topo_name): + pattern = re.compile(r'^(t0|t1|ptf|fullmesh|dualtor)') + match = pattern.match(topo_name) + if not match: + raise Exception("Unsupported testbed type - {}".format(topo_name)) + topo_type = match.group() + if 'dualtor' in topo_type: + # set dualtor topology type to 't0' to avoid adding it in each test script. + topo_type = 't0' + return topo_type + + +def read_topo(topo_name): + topo_file_path = os.path.join(TOPO_FILE_FOLDER, TOPO_FILENAME_TEMPLATE.format(topo_name)) + try: + with open(topo_file_path) as f: + return yaml.safe_load(f) + except IOError: + return {} + + +def announce_routes(ptf_ip, port, routes): + messages = [] + for prefix, nexthop, aspath in routes: + if aspath: + messages.append("announce route {} next-hop {} as-path [ {} ]".format(prefix, nexthop, aspath)) + else: + messages.append("announce route {} next-hop {}".format(prefix, nexthop)) + + url = "http://%s:%d" % (ptf_ip, port) + data = { "commands": ";".join(messages) } + r = requests.post(url, data=data) + assert r.status_code == 200 + + +def generate_routes(family, podset_number, tor_number, tor_subnet_number, + spine_asn, leaf_asn_start, tor_asn_start, + nexthop, nexthop_v6, + tor_subnet_size, max_tor_subnet_number, + router_type = "leaf", tor_index=None): + routes = [] + + default_route_as_path = "6666 6667" + + if router_type == "leaf": + default_route_as_path = "{} {}".format(spine_asn, default_route_as_path) + + if router_type != 'tor': + if family in ["v4", "both"]: + routes.append(("0.0.0.0/0", nexthop, default_route_as_path)) + if family in ["v6", "both"]: + routes.append(("::/0", nexthop_v6, default_route_as_path)) + + # NOTE: Using large enough values (e.g., podset_number = 200, + # us to overflow the 192.168.0.0/16 private address space here. + # This should be fine for internal use, but may pose an issue if used otherwise + for podset in range(0, podset_number): + for tor in range(0, tor_number): + for subnet in range(0, tor_subnet_number): + if router_type == "spine": + # Skip podset 0 for T2 + if podset == 0: + continue + elif router_type == "leaf": + # Skip tor 0 podset 0 for T1 + if podset == 0 and tor == 0: + continue + elif router_type == "tor": + # Skip non podset 0 for T0 + if podset != 0: + continue + elif tor != tor_index: + continue + + suffix = ( (podset * tor_number * max_tor_subnet_number * tor_subnet_size) + \ + (tor * max_tor_subnet_number * tor_subnet_size) + \ + (subnet * tor_subnet_size) ) + octet2 = (168 + (suffix / (256 ** 2))) + octet1 = (192 + (octet2 / 256)) + octet2 = (octet2 % 256) + octet3 = ((suffix / 256) % 256) + octet4 = (suffix % 256) + prefixlen_v4 = (32 - int(math.log(tor_subnet_size, 2))) + + prefix = "{}.{}.{}.{}/{}".format(octet1, octet2, octet3, octet4, prefixlen_v4) + prefix_v6 = "20%02X:%02X%02X:0:%02X::/64" % (octet1, octet2, octet3, octet4) + + leaf_asn = leaf_asn_start + podset + tor_asn = tor_asn_start + tor + + aspath = None + if router_type == "spine": + aspath = "{} {}".format(leaf_asn, tor_asn) + elif router_type == "leaf": + if podset == 0: + aspath = "{}".format(tor_asn) + else: + aspath = "{} {} {}".format(spine_asn, leaf_asn, tor_asn) + + if family in ["v4", "both"]: + routes.append((prefix, nexthop, aspath)) + if family in ["v6", "both"]: + routes.append((prefix_v6, nexthop_v6, aspath)) + + return routes + + +def fib_t0(topo, ptf_ip): + + common_config = topo['configuration_properties'].get('common', {}) + podset_number = common_config.get("podset_number", PODSET_NUMBER) + tor_number = common_config.get("tor_number", TOR_NUMBER) + tor_subnet_number = common_config.get("tor_subnet_number", TOR_SUBNET_NUMBER) + max_tor_subnet_number = common_config.get("max_tor_subnet_number", MAX_TOR_SUBNET_NUMBER) + tor_subnet_size = common_config.get("tor_subnet_size", TOR_SUBNET_SIZE) + nhipv4 = common_config.get("nhipv4", NHIPV4) + nhipv6 = common_config.get("nhipv6", NHIPV6) + spine_asn = common_config.get("spine_asn", SPINE_ASN) + leaf_asn_start = common_config.get("leaf_asn_start", LEAF_ASN_START) + tor_asn_start = common_config.get("tor_asn_start", TOR_ASN_START) + + vms = topo['topology']['VMs'] + for vm in vms.values(): + vm_offset = vm['vm_offset'] + port = IPV4_BASE_PORT + vm_offset + port6 = IPV6_BASE_PORT + vm_offset + + routes_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, + spine_asn, leaf_asn_start, tor_asn_start, + nhipv4, nhipv4, tor_subnet_size, max_tor_subnet_number) + routes_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, + spine_asn, leaf_asn_start, tor_asn_start, + nhipv6, nhipv6, tor_subnet_size, max_tor_subnet_number) + + announce_routes(ptf_ip, port, routes_v4) + announce_routes(ptf_ip, port6, routes_v6) + + +def fib_t1_lag(topo, ptf_ip): + + common_config = topo['configuration_properties'].get('common', {}) + podset_number = common_config.get("podset_number", PODSET_NUMBER) + tor_number = common_config.get("tor_number", TOR_NUMBER) + tor_subnet_number = common_config.get("tor_subnet_number", TOR_SUBNET_NUMBER) + max_tor_subnet_number = common_config.get("max_tor_subnet_number", MAX_TOR_SUBNET_NUMBER) + tor_subnet_size = common_config.get("tor_subnet_size", TOR_SUBNET_SIZE) + nhipv4 = common_config.get("nhipv4", NHIPV4) + nhipv6 = common_config.get("nhipv6", NHIPV6) + leaf_asn_start = common_config.get("leaf_asn_start", LEAF_ASN_START) + tor_asn_start = common_config.get("tor_asn_start", TOR_ASN_START) + + vms = topo['topology']['VMs'] + vms_config = topo['configuration'] + + for k, v in vms_config.items(): + vm_offset = vms[k]['vm_offset'] + port = IPV4_BASE_PORT + vm_offset + port6 = IPV6_BASE_PORT + vm_offset + + router_type = None + if 'spine' in v['properties']: + router_type = 'spine' + elif 'tor' in v['properties']: + router_type = 'tor' + tornum = v.get('tornum', None) + tor_index = tornum - 1 if tornum is not None else None + if router_type: + routes_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, + None, leaf_asn_start, tor_asn_start, + nhipv4, nhipv6, tor_subnet_size, max_tor_subnet_number, + router_type=router_type, tor_index=tor_index) + routes_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, + None, leaf_asn_start, tor_asn_start, + nhipv4, nhipv6, tor_subnet_size, max_tor_subnet_number, + router_type=router_type, tor_index=tor_index) + announce_routes(ptf_ip, port, routes_v4) + announce_routes(ptf_ip, port6, routes_v6) + + if 'vips' in v: + routes_vips = [] + for prefix in v["vips"]["ipv4"]["prefixes"]: + routes_vips.append((prefix, nhipv4, v["vips"]["ipv4"]["asn"])) + announce_routes(ptf_ip, port, routes_vips) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + topo_name=dict(required=True, type='str'), + ptf_ip=dict(required=True, type='str') + ), + supports_check_mode=False) + + topo_name = module.params['topo_name'] + ptf_ip = module.params['ptf_ip'] + + topo = read_topo(topo_name) + if not topo: + module.fail_json(msg='Unable to load topology "{}"'.format(topo_name)) + + topo_type = get_topo_type(topo_name) + + if topo_type == "t0": + fib_t0(topo, ptf_ip) + module.exit_json(changed=True) + elif topo_type == "t1": + fib_t1_lag(topo, ptf_ip) + module.exit_json(changed=True) + else: + module.fail_json(msg='Unsupported topology "{}"'.format(topo_name)) + + +if __name__ == '__main__': + main() diff --git a/ansible/roles/vm_set/tasks/add_topo.yml b/ansible/roles/vm_set/tasks/add_topo.yml index 4c0ad13d391..441e96a39f6 100644 --- a/ansible/roles/vm_set/tasks/add_topo.yml +++ b/ansible/roles/vm_set/tasks/add_topo.yml @@ -123,6 +123,9 @@ include_tasks: start_ptf_tgen.yml when: topo == 'fullmesh' + - name: Announce routes + include_tasks: announce_routes.yml + - name: Start mux simulator include_tasks: control_mux_simulator.yml vars: diff --git a/ansible/roles/vm_set/tasks/announce_routes.yml b/ansible/roles/vm_set/tasks/announce_routes.yml new file mode 100644 index 00000000000..067347ff8e6 --- /dev/null +++ b/ansible/roles/vm_set/tasks/announce_routes.yml @@ -0,0 +1,86 @@ +--- +- name: Include variables for PTF containers + include_vars: + dir: "{{ playbook_dir }}/group_vars/ptf_host/" + +- name: Set ptf host + set_fact: + ptf_host: "ptf_{{ vm_set_name }}" + ptf_host_ip: "{{ ptf_ip.split('/')[0] }}" + +- name: Add ptf host + add_host: + hostname: "{{ ptf_host }}" + ansible_user: "{{ ptf_host_user }}" + ansible_ssh_host: "{{ ptf_host_ip }}" + ansible_ssh_pass: "{{ ptf_host_pass }}" + ansible_python_interpreter: "/usr/bin/python" + groups: + - ptf_host + +- name: Set facts + set_fact: + ptf_local_ipv4: "{{ configuration_properties.common.nhipv4|default('10.10.246.254') }}" + ptf_local_ipv6: "{{ configuration_properties.common.nhipv6|default('fc0a::ff') }}" + +- name: Start exabgp processes for IPv4 on PTF + exabgp: + name: "{{ vm_item.key }}" + state: "started" + router_id: "{{ ptf_local_ipv4 }}" + local_ip: "{{ ptf_local_ipv4 }}" + peer_ip: "{{ configuration[vm_item.key].bp_interface.ipv4.split('/')[0] }}" + local_asn: "{{ configuration[vm_item.key].bgp.asn }}" + peer_asn: "{{ configuration[vm_item.key].bgp.asn }}" + port: "{{ 5000 + vm_item.value.vm_offset|int }}" + async: 300 + poll: 0 + loop: "{{ topology['VMs']|dict2items }}" + loop_control: + loop_var: vm_item + delegate_to: "{{ ptf_host }}" + +- name: Start exabgp processes for IPv6 on PTF + exabgp: + name: "{{ vm_item.key }}-v6" + state: "started" + router_id: "{{ ptf_local_ipv4 }}" + local_ip: "{{ ptf_local_ipv6 }}" + peer_ip: "{{ configuration[vm_item.key].bp_interface.ipv6.split('/')[0] }}" + local_asn: "{{ configuration[vm_item.key].bgp.asn }}" + peer_asn: "{{ configuration[vm_item.key].bgp.asn }}" + port: "{{ 6000 + vm_item.value.vm_offset|int }}" + async: 300 + poll: 0 + loop: "{{ topology['VMs']|dict2items }}" + loop_control: + loop_var: vm_item + delegate_to: "{{ ptf_host }}" + +- name: Verify that exabgp processes for IPv4 are started + wait_for: + host: "{{ ptf_host_ip }}" + port: "{{ 5000 + topology.VMs[vm_item.key].vm_offset|int }}" + state: "started" + timeout: 180 + loop: "{{ topology['VMs']|dict2items }}" + loop_control: + loop_var: vm_item + delegate_to: localhost + +- name: Verify that exabgp processes for IPv6 are started + wait_for: + host: "{{ ptf_host_ip }}" + port: "{{ 6000 + topology.VMs[vm_item.key].vm_offset|int }}" + state: "started" + timeout: 180 + loop: "{{ topology['VMs']|dict2items }}" + loop_control: + loop_var: vm_item + delegate_to: localhost + +- name: Announce routes + announce_routes: + topo_name: "{{ topo }}" + ptf_ip: "{{ ptf_host_ip }}" + delegate_to: localhost diff --git a/tests/common/plugins/fib.py b/tests/common/plugins/fib.py deleted file mode 100644 index ecefdfb55ed..00000000000 --- a/tests/common/plugins/fib.py +++ /dev/null @@ -1,266 +0,0 @@ -import sys -import time -import math -import requests -import pytest -import logging -import ipaddr as ipaddress -from tests.common.utilities import wait_tcp_connection - -logger = logging.getLogger(__name__) - - -def announce_routes(ptfip, port, routes): - messages = [] - for prefix, nexthop, aspath in routes: - if aspath: - messages.append("announce route {} next-hop {} as-path [ {} ]".format(prefix, nexthop, aspath)) - else: - messages.append("announce route {} next-hop {}".format(prefix, nexthop)) - - url = "http://%s:%d" % (ptfip, port) - data = { "commands": ";".join(messages) } - r = requests.post(url, data=data) - assert r.status_code == 200 - - -def generate_routes(family, podset_number, tor_number, tor_subnet_number, - spine_asn, leaf_asn_start, tor_asn_start, - nexthop, nexthop_v6, - tor_subnet_size, max_tor_subnet_number, - router_type = "leaf", tor_index=None): - routes = [] - - default_route_as_path = "6666 6667" - - if router_type == "leaf": - default_route_as_path = "{} {}".format(spine_asn, default_route_as_path) - - if router_type != 'tor': - if family in ["v4", "both"]: - routes.append(("0.0.0.0/0", nexthop, default_route_as_path)) - if family in ["v6", "both"]: - routes.append(("::/0", nexthop_v6, default_route_as_path)) - - # NOTE: Using large enough values (e.g., podset_number = 200, - # us to overflow the 192.168.0.0/16 private address space here. - # This should be fine for internal use, but may pose an issue if used otherwise - for podset in range(0, podset_number): - for tor in range(0, tor_number): - for subnet in range(0, tor_subnet_number): - if router_type == "spine": - # Skip podset 0 for T2 - if podset == 0: - continue - elif router_type == "leaf": - # Skip tor 0 podset 0 for T1 - if podset == 0 and tor == 0: - continue - elif router_type == "tor": - # Skip non podset 0 for T0 - if podset != 0: - continue - elif tor != tor_index: - continue - - suffix = ( (podset * tor_number * max_tor_subnet_number * tor_subnet_size) + \ - (tor * max_tor_subnet_number * tor_subnet_size) + \ - (subnet * tor_subnet_size) ) - octet2 = (168 + (suffix / (256 ** 2))) - octet1 = (192 + (octet2 / 256)) - octet2 = (octet2 % 256) - octet3 = ((suffix / 256) % 256) - octet4 = (suffix % 256) - prefixlen_v4 = (32 - int(math.log(tor_subnet_size, 2))) - - prefix = "{}.{}.{}.{}/{}".format(octet1, octet2, octet3, octet4, prefixlen_v4) - prefix_v6 = "20%02X:%02X%02X:0:%02X::/64" % (octet1, octet2, octet3, octet4) - - leaf_asn = leaf_asn_start + podset - tor_asn = tor_asn_start + tor - - aspath = None - if router_type == "spine": - aspath = "{} {}".format(leaf_asn, tor_asn) - elif router_type == "leaf": - if podset == 0: - aspath = "{}".format(tor_asn) - else: - aspath = "{} {} {}".format(spine_asn, leaf_asn, tor_asn) - - if family in ["v4", "both"]: - routes.append((prefix, nexthop, aspath)) - if family in ["v6", "both"]: - routes.append((prefix_v6, nexthop_v6, aspath)) - - return routes - - -def fib_t0(ptfhost, tbinfo, localhost, topology=None): - logger.info("use fib_t0 to setup routes for topo {}".format(tbinfo['topo']['name'])) - - common_config_topo = tbinfo['topo']['properties']['configuration_properties']['common'] - podset_number = common_config_topo.get("podset_number", 200) - tor_number = common_config_topo.get("tor_number", 16) - tor_subnet_number = common_config_topo.get("tor_subnet_number", 2) - max_tor_subnet_number = common_config_topo.get("max_tor_subnet_number", 16) - tor_subnet_size = common_config_topo.get("tor_subnet_size", 128) - nhipv4 = common_config_topo.get("nhipv4", "10.10.246.254") - nhipv6 = common_config_topo.get("nhipv6", "fc0a::ff") - spine_asn = common_config_topo.get("spine_asn", 65534) - leaf_asn_start = common_config_topo.get("leaf_asn_start", 64600) - tor_asn_start = common_config_topo.get("tor_asn_start", 65500) - - ptf_hostname = tbinfo['ptf'] - ptfip = ptfhost.host.options['inventory_manager'].get_host(ptf_hostname).vars['ansible_host'] - - local_ip = ipaddress.IPAddress(nhipv4) - local_ipv6 = ipaddress.IPAddress(nhipv6) - for k, v in tbinfo['topo']['properties']['configuration'].items(): - vm_offset = tbinfo['topo']['properties']['topology']['VMs'][k]['vm_offset'] - peer_ip = ipaddress.IPNetwork(v['bp_interface']['ipv4']) - peer_ipv6 = ipaddress.IPNetwork(v['bp_interface']['ipv6']) - asn = int(v['bgp']['asn']) - port = 5000 + vm_offset - port6 = 6000 + vm_offset - - ptfhost.exabgp(name=k, - state="started", \ - router_id = str(local_ip), \ - local_ip = str(local_ip), \ - peer_ip = str(peer_ip.ip), \ - local_asn = asn, \ - peer_asn = asn, \ - port = port) - - ptfhost.exabgp(name="%s-v6" % k, - state="started", \ - router_id = str(local_ip), \ - local_ip = str(local_ipv6), \ - peer_ip = str(peer_ipv6.ip), \ - local_asn = asn, \ - peer_asn = asn, \ - port = port6) - # check if bgp http_api is ready - for k, v in tbinfo['topo']['properties']['configuration'].items(): - vm_offset = tbinfo['topo']['properties']['topology']['VMs'][k]['vm_offset'] - - port = 5000 + vm_offset - assert wait_tcp_connection(localhost, ptfip, port) - - port6 = 6000 + vm_offset - assert wait_tcp_connection(localhost, ptfip, port6) - - for k, v in tbinfo['topo']['properties']['configuration'].items(): - vm_offset = tbinfo['topo']['properties']['topology']['VMs'][k]['vm_offset'] - port = 5000 + vm_offset - port6 = 6000 + vm_offset - - routes_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, - spine_asn, leaf_asn_start, tor_asn_start, - local_ip, local_ipv6, tor_subnet_size, max_tor_subnet_number) - routes_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, - spine_asn, leaf_asn_start, tor_asn_start, - local_ip, local_ipv6, tor_subnet_size, max_tor_subnet_number) - - announce_routes(ptfip, port, routes_v4) - announce_routes(ptfip, port6, routes_v6) - - -def fib_t1_lag(ptfhost, tbinfo, localhost): - logger.info("use fib_t1_lag to setup routes for topo {}".format(tbinfo['topo']['name'])) - - common_config_topo = tbinfo['topo']['properties']['configuration_properties']['common'] - podset_number = common_config_topo.get("podset_number", 200) - tor_number = common_config_topo.get("tor_number", 16) - tor_subnet_number = common_config_topo.get("tor_subnet_number", 2) - max_tor_subnet_number = common_config_topo.get("max_tor_subnet_number", 16) - tor_subnet_size = common_config_topo.get("tor_subnet_size", 128) - nhipv4 = common_config_topo.get("nhipv4", "10.10.246.254") - nhipv6 = common_config_topo.get("nhipv6", "fc0a::ff") - leaf_asn_start = common_config_topo.get("leaf_asn_start", 64600) - tor_asn_start = common_config_topo.get("tor_asn_start", 65500) - - ptf_hostname = tbinfo['ptf'] - ptfip = ptfhost.host.options['inventory_manager'].get_host(ptf_hostname).vars['ansible_host'] - - local_ip = ipaddress.IPAddress(nhipv4) - local_ipv6 = ipaddress.IPAddress(nhipv6) - - for k, v in tbinfo['topo']['properties']['configuration'].items(): - vm_offset = tbinfo['topo']['properties']['topology']['VMs'][k]['vm_offset'] - peer_ip = ipaddress.IPNetwork(v['bp_interface']['ipv4']) - peer_ipv6 = ipaddress.IPNetwork(v['bp_interface']['ipv6']) - asn = int(v['bgp']['asn']) - port = 5000 + vm_offset - port6 = 6000 + vm_offset - - ptfhost.exabgp(name=k, - state="started", \ - router_id = str(local_ip), \ - local_ip = str(local_ip), \ - peer_ip = str(peer_ip.ip), \ - local_asn = asn, \ - peer_asn = asn, \ - port = port) - - ptfhost.exabgp(name="%s-v6" % k, - state="started", \ - router_id = str(local_ip), \ - local_ip = str(local_ipv6), \ - peer_ip = str(peer_ipv6.ip), \ - local_asn = asn, \ - peer_asn = asn, \ - port = port6) - # Check if bgp http_api port is ready - for k, v in tbinfo['topo']['properties']['configuration'].items(): - vm_offset = tbinfo['topo']['properties']['topology']['VMs'][k]['vm_offset'] - - port = 5000 + vm_offset - assert wait_tcp_connection(localhost, ptfip, port) - - port6 = 6000 + vm_offset - assert wait_tcp_connection(localhost, ptfip, port6) - - for k, v in tbinfo['topo']['properties']['configuration'].items(): - - vm_offset = tbinfo['topo']['properties']['topology']['VMs'][k]['vm_offset'] - port = 5000 + vm_offset - port6 = 6000 + vm_offset - - router_type = None - if 'spine' in v['properties']: - router_type = 'spine' - elif 'tor' in v['properties']: - router_type = 'tor' - tornum = v.get('tornum', None) - tor_index = tornum - 1 if tornum is not None else None - if router_type: - routes_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, - None, leaf_asn_start, tor_asn_start, - local_ip, local_ipv6, tor_subnet_size, max_tor_subnet_number, - router_type=router_type, tor_index=tor_index) - routes_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, - None, leaf_asn_start, tor_asn_start, - local_ip, local_ipv6, tor_subnet_size, max_tor_subnet_number, - router_type=router_type, tor_index=tor_index) - announce_routes(ptfip, port, routes_v4) - announce_routes(ptfip, port6, routes_v6) - - if 'vips' in v: - routes_vips = [] - for prefix in v["vips"]["ipv4"]["prefixes"]: - routes_vips.append((prefix, local_ip, v["vips"]["ipv4"]["asn"])) - announce_routes(ptfip, port, routes_vips) - - -@pytest.fixture(scope='module') -def fib(ptfhost, tbinfo, localhost): - topology = tbinfo['topo']['name'] - logger.info("setup fib to topo {}".format(topology)) - if tbinfo['topo']['type'] == "t0": - fib_t0(ptfhost, tbinfo, localhost, topology) - elif tbinfo['topo']['type'] == "t1": - fib_t1_lag(ptfhost, tbinfo, localhost) - else: - logger.error("unknown topology {}".format(tbinfo['topo']['name'])) diff --git a/tests/conftest.py b/tests/conftest.py index 3c86dc6aadf..1b9d56b961a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -33,7 +33,6 @@ pytest_plugins = ('tests.common.plugins.ptfadapter', 'tests.common.plugins.ansible_fixtures', 'tests.common.plugins.dut_monitor', - 'tests.common.plugins.fib', 'tests.common.plugins.tacacs', 'tests.common.plugins.loganalyzer', 'tests.common.plugins.psu_controller', diff --git a/tests/decap/test_decap.py b/tests/decap/test_decap.py index 58b1be7ed73..86d7717b301 100644 --- a/tests/decap/test_decap.py +++ b/tests/decap/test_decap.py @@ -1,5 +1,6 @@ import json import logging +import tempfile from datetime import datetime import pytest @@ -11,7 +12,6 @@ from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import] from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import] from tests.ptf_runner import ptf_runner -from tests.common.plugins.fib import generate_routes logger = logging.getLogger(__name__) @@ -22,99 +22,83 @@ pytest.mark.topology('any') ] -def get_uplink_ports(topology, topo_type): - uplink_ports = [] - if topo_type == "t0": - for k, v in topology["VMs"].items(): - if "T1" in k: - uplink_ports.append("[{}]".format(" ".join([str(vlan) for vlan in v["vlans"]]))) - elif topo_type == "t1": - for k, v in topology["VMs"].items(): - if "T2" in k: - uplink_ports.append("[{}]".format(" ".join([str(vlan) for vlan in v["vlans"]]))) - return uplink_ports - - -def get_downlink_ports(topology, topo_type): - downlink_ports = [] - if topo_type == "t0": - if "host_interfaces" in topology: - for intf in topology["host_interfaces"]: - downlink_ports.append("[{}]".format(intf)) - if "disabled_host_interfaces" in topology: - for intf in topology["disabled_host_interfaces"]: - downlink_ports.remove("[{}]".format(intf)) - elif topo_type == "t1": - for k, v in topology["VMs"].items(): - if "T0" in k: - downlink_ports.append("[{}]".format(" ".join([str(vlan) for vlan in v["vlans"]]))) - return downlink_ports - - -def gen_fib_info(ptfhost, tbinfo, cfg_facts): - - topo_type = tbinfo["topo"]["type"] - topology = tbinfo["topo"]["properties"]["topology"] - - # uplink ports - uplink_ports_str = " ".join(get_uplink_ports(topology, topo_type)) - - # downlink ports - downlink_ports_str = " ".join(get_downlink_ports(topology, topo_type)) - - fibs = [] - common_config_topo = tbinfo['topo']['properties']['configuration_properties']['common'] - podset_number = 5 # Limit the number of podsets to limit test execution time - tor_number = common_config_topo.get("tor_number", 16) - tor_subnet_number = common_config_topo.get("tor_subnet_number", 2) - max_tor_subnet_number = common_config_topo.get("max_tor_subnet_number", 16) - tor_subnet_size = common_config_topo.get("tor_subnet_size", 128) - - # routes to uplink - routes_uplink_v4 = [] - routes_uplink_v6 = [] - if topo_type == "t0": - routes_uplink_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, - 0, 0, 0, "", "", tor_subnet_size, max_tor_subnet_number) - routes_uplink_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, - 0, 0, 0, "", "", tor_subnet_size, max_tor_subnet_number) - elif topo_type == "t1": - routes_uplink_v4 = generate_routes("v4", podset_number, tor_number, tor_subnet_number, - 0, 0, 0, "", "", tor_subnet_size, max_tor_subnet_number, - router_type="spine") - routes_uplink_v6 = generate_routes("v6", podset_number, tor_number, tor_subnet_number, - 0, 0, 0, "", "", tor_subnet_size, max_tor_subnet_number, - router_type="spine") - - for prefix, _, _ in routes_uplink_v4: - fibs.append("{} {}".format(prefix, uplink_ports_str)) - for prefix, _, _ in routes_uplink_v6: - fibs.append("{} {}".format(prefix, uplink_ports_str)) - - routes_downlink_v4 = [] - routes_downlink_v6 = [] - if topo_type == "t1": - for tor_index in range(tor_number): - routes_downlink_v4.extend(generate_routes("v4", podset_number, tor_number, tor_subnet_number, - 0, 0, 0, "", "", tor_subnet_size, max_tor_subnet_number, - router_type="tor", tor_index=tor_index)) - - routes_downlink_v6.extend(generate_routes("v6", podset_number, tor_number, tor_subnet_number, - 0, 0, 0, "", "", tor_subnet_size, max_tor_subnet_number, - router_type="tor", tor_index=tor_index)) - - for prefix, _, _ in routes_downlink_v4: - fibs.append("{} {}".format(prefix, downlink_ports_str)) - for prefix, _, _ in routes_downlink_v6: - fibs.append("{} {}".format(prefix, downlink_ports_str)) - - ptfhost.copy(content="\n".join(fibs), dest="/root/fib_info.txt") - - -def prepare_ptf(ptfhost, tbinfo, cfg_facts): - - gen_fib_info(ptfhost, tbinfo, cfg_facts) +def get_fib_info(duthost, cfg_facts, mg_facts): + """Get parsed FIB information from redis DB. + + Args: + duthost (SonicHost): Object for interacting with DUT. + cfg_facts (dict): Configuration facts. + mg_facts (dict): Minigraph facts. + + Returns: + dict: Map of prefix to PTF ports that are connected to DUT output ports. + { + '192.168.0.0/21': [], + '192.168.8.0/25': [[58 59] [62 63] [66 67] [70 71]], + '192.168.16.0/25': [[58 59] [62 63] [66 67] [70 71]], + ... + '20c0:c2e8:0:80::/64': [[58 59] [62 63] [66 67] [70 71]], + '20c1:998::/64': [[58 59] [62 63] [66 67] [70 71]], + ... + } + """ + timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S') + duthost.shell("redis-dump -d 0 -k 'ROUTE*' -y > /tmp/fib.{}.txt".format(timestamp)) + duthost.fetch(src="/tmp/fib.{}.txt".format(timestamp), dest="/tmp/fib") + + po = cfg_facts.get('PORTCHANNEL', {}) + ports = cfg_facts.get('PORT', {}) + + fib_info = {} + with open("/tmp/fib/{}/tmp/fib.{}.txt".format(duthost.hostname, timestamp)) as fp: + fib = json.load(fp) + for k, v in fib.items(): + skip = False + + prefix = k.split(':', 1)[1] + ifnames = v['value']['ifname'].split(',') + nh = v['value']['nexthop'] + + oports = [] + for ifname in ifnames: + if po.has_key(ifname): + oports.append([str(mg_facts['minigraph_ptf_indices'][x]) for x in po[ifname]['members']]) + else: + if ports.has_key(ifname): + oports.append([str(mg_facts['minigraph_ptf_indices'][ifname])]) + else: + logger.info("Route point to non front panel port {}:{}".format(k, v)) + skip = True + + # skip direct attached subnet + if nh == '0.0.0.0' or nh == '::' or nh == "": + skip = True + + if not skip: + fib_info[prefix] = oports + else: + fib_info[prefix] = [] + return fib_info + + +def gen_fib_info_file(ptfhost, fib_info, filename): + tmp_fib_info = tempfile.NamedTemporaryFile() + for prefix, oports in fib_info.items(): + tmp_fib_info.write(prefix) + if oports: + for op in oports: + tmp_fib_info.write(' [{}]'.format(' '.join(op))) + else: + tmp_fib_info.write(' []') + tmp_fib_info.write('\n') + tmp_fib_info.flush() + ptfhost.copy(src=tmp_fib_info.name, dest=filename) + + +def prepare_ptf(duthost, ptfhost, cfg_facts, mg_facts): + fib_info = get_fib_info(duthost, cfg_facts, mg_facts) + gen_fib_info_file(ptfhost, fib_info, FIB_INFO_DEST) @pytest.fixture(scope="module") @@ -137,6 +121,7 @@ def setup_teardown(request, tbinfo, duthosts, rand_one_dut_hostname, ptfhost): # Gather some facts cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")["ansible_facts"] + mg_facts = duthost.get_extended_minigraph_facts(tbinfo) lo_ip = None lo_ipv6 = None @@ -191,7 +176,7 @@ def setup_teardown(request, tbinfo, duthosts, rand_one_dut_hostname, ptfhost): duthost.shell('docker exec swss sh -c "swssconfig /decap_conf.json"') # Prepare PTFf docker - prepare_ptf(ptfhost, tbinfo, cfg_facts) + prepare_ptf(duthost, ptfhost, cfg_facts, mg_facts) setup_info = { "src_ports": ",".join([str(port) for port in src_ports]), diff --git a/tests/run_tests.sh b/tests/run_tests.sh index d9302e56458..692fb2613ed 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -5,7 +5,7 @@ function show_help_and_exit() echo "Usage ${SCRIPT} [options]" echo " options with (*) must be provided" echo " -h -? : get this help" - echo " -a : specify if autu-recover is allowed (default: True)" + echo " -a : specify if auto-recover is allowed (default: True)" echo " -b : specify name of k8s master group used in k8s inventory, format: k8s_vms{msetnumber}_{servernumber}" echo " -c : specify test cases to execute (default: none, executed all matched)" echo " -d : specify DUT name (default: DUT name associated with testbed in testbed file)" diff --git a/tests/test_announce_routes.py b/tests/test_announce_routes.py deleted file mode 100644 index b8738001275..00000000000 --- a/tests/test_announce_routes.py +++ /dev/null @@ -1,12 +0,0 @@ -import pytest - -pytestmark = [ - pytest.mark.pretest, - pytest.mark.topology('util') #special marker -] - -def test_announce_routes(fib): - """Simple test case that utilize fib to announce route in order to a newly setup test bed receive - BGP routes from remote devices - """ - assert True