diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py index e3c9fa364d7..8556899b3ec 100644 --- a/ansible/library/test_facts.py +++ b/ansible/library/test_facts.py @@ -4,9 +4,8 @@ import ipaddr as ipaddress import csv import string -from operator import itemgetter -from itertools import groupby import yaml + from collections import defaultdict DOCUMENTATION = ''' @@ -97,52 +96,76 @@ class ParseTestbedTopoinfo(): - ''' - Parse the CSV file used to describe whole testbed info - Please refer to the example of the CSV file format - CSV file first line is title - The topology name in title is using conf-name - ''' + """Parse the testbed file used to describe whole testbed info""" + + TESTBED_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') + def __init__(self, testbed_file): self.testbed_filename = testbed_file self.testbed_topo = defaultdict() def read_testbed_topo(self): - CSV_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') - with open(self.testbed_filename) as f: - topo = csv.DictReader(f, fieldnames=CSV_FIELDS, delimiter=',') - - # Validate all field are in the same order and are present - header = next(topo) - for field in CSV_FIELDS: - assert header[field].replace('#', '').strip() == field - - for line in topo: - if line['conf-name'].lstrip().startswith('#'): - ### skip comment line - continue - if line['ptf_ip']: - ptfaddress = ipaddress.IPNetwork(line['ptf_ip']) - line['ptf_ip'] = str(ptfaddress.ip) - line['ptf_netmask'] = str(ptfaddress.netmask) - if line['ptf_ipv6']: - ptfaddress = ipaddress.IPNetwork(line['ptf_ipv6']) - line['ptf_ipv6'] = str(ptfaddress.ip) - line['ptf_netmask_v6'] = str(ptfaddress.netmask) - - line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') - line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']} - del line['dut'] - - self.testbed_topo[line['conf-name']] = line - return + + def _cidr_to_ip_mask(network): + addr = ipaddress.IPNetwork(network) + return str(addr.ip), str(addr.netmask) + + def _read_testbed_topo_from_csv(): + """Read csv testbed info file.""" + with open(self.testbed_filename) as f: + topo = csv.DictReader(f, fieldnames=self.TESTBED_FIELDS, + delimiter=',') + + # Validate all field are in the same order and are present + header = next(topo) + for field in self.TESTBED_FIELDS: + assert header[field].replace('#', '').strip() == field + + for line in topo: + if line['conf-name'].lstrip().startswith('#'): + # skip comment line + continue + if line['ptf_ip']: + line['ptf_ip'], line['ptf_netmask'] = \ + _cidr_to_ip_mask(line["ptf_ip"]) + if line['ptf_ipv6']: + line['ptf_ipv6'], line['ptf_netmask_v6'] = \ + _cidr_to_ip_mask(line["ptf_ipv6"]) + + line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']} + del line['dut'] + + self.testbed_topo[line['conf-name']] = line + + def _read_testbed_topo_from_yaml(): + """Read yaml testbed info file.""" + with open(self.testbed_filename) as f: + tb_info = yaml.safe_load(f) + for tb in tb_info: + if tb["ptf_ip"]: + tb["ptf_ip"], tb["ptf_netmask"] = \ + _cidr_to_ip_mask(tb["ptf_ip"]) + if tb["ptf_ipv6"]: + tb["ptf_ipv6"], tb["ptf_netmask_v6"] = \ + _cidr_to_ip_mask(tb["ptf_ipv6"]) + tb["duts"] = tb.pop("dut") + tb["duts_map"] = \ + {dut: i for i, dut in enumerate(tb["duts"])} + self.testbed_topo[tb["conf-name"]] = tb + + if self.testbed_filename.endswith(".csv"): + _read_testbed_topo_from_csv() + elif self.testbed_filename.endswith(".yaml"): + _read_testbed_topo_from_yaml() def get_testbed_info(self, testbed_name): if testbed_name: - return [self.testbed_topo[testbed_name]] + return self.testbed_topo[testbed_name] else: return self.testbed_topo + class TestcasesTopology(): ''' Read testcases definition yaml file under ansible/roles/test/vars/testcases.yml @@ -167,6 +190,7 @@ def read_testcases(self): def get_topo_testcase(self): return self.topo_testcase + def main(): module = AnsibleModule( argument_spec=dict( @@ -183,7 +207,7 @@ def main(): try: topoinfo = ParseTestbedTopoinfo(testbed_file) topoinfo.read_testbed_topo() - testbed_topo = topoinfo.get_testbed_info(testbed_name)[0] + testbed_topo = topoinfo.get_testbed_info(testbed_name) testcaseinfo = TestcasesTopology(testcase_file) testcaseinfo.read_testcases() testcase_topo = testcaseinfo.get_topo_testcase() @@ -193,6 +217,7 @@ def main(): except Exception as e: module.fail_json(msg=traceback.format_exc()) + from ansible.module_utils.basic import * if __name__== "__main__": main() diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index d231252f285..13184868c88 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -63,10 +63,8 @@ function usage exit } -function read_file +function read_csv { - echo reading - # Filter testbed names in the first column in the testbed definition file line=$(cat $tbfile | grep "^$1,") @@ -100,6 +98,59 @@ function read_file duts=${dut//[\[\] ]/} } +function read_yaml +{ + content=$(python -c "from __future__ import print_function; import yaml; print('+'.join(str(tb) for tb in yaml.safe_load(open('$tbfile')) if '$1' in str(tb)))") + + IFS=$'+' read -r -a tb_lines <<< $content + linecount=${#tb_lines[@]} + + if [ $linecount == 0 ] + then + echo "Couldn't find topology name '$1'" + exit + elif [ $linecount -gt 1 ] + then + echo "Find more than one topology name in $tbfile" + exit + else + echo found topology $1 + fi + + tb_line=${tb_lines[0]} + line_arr=($1) + for attr in group-name topo ptf_image_name ptf ptf_ip ptf_ipv6 server vm_base dut comment; + do + value=$(python -c "from __future__ import print_function; tb=eval(\"$tb_line\"); print(tb['$attr'])") + [ "$value" == "None" ] && value= + line_arr=("${line_arr[@]}" "$value") + done + + vm_set_name=${line_arr[1]} + topo=${line_arr[2]} + ptf_imagename=${line_arr[3]} + ptf=${line_arr[4]} + ptf_ip=${line_arr[5]} + ptf_ipv6=${line_arr[6]} + server=${line_arr[7]} + vm_base=${line_arr[8]} + dut=${line_arr[9]} + duts=$(python -c "from __future__ import print_function; print(','.join(eval(\"$dut\")))") +} + +function read_file +{ + echo reading + + if [[ $tbfile == *.csv ]] + then + read_csv ${topology} + elif [[ $tbfile == *.yaml ]] + then + read_yaml ${topology} + fi +} + function start_vms { server=$1 diff --git a/ansible/testbed.yaml b/ansible/testbed.yaml new file mode 100644 index 00000000000..4670834ec5e --- /dev/null +++ b/ansible/testbed.yaml @@ -0,0 +1,158 @@ +--- + +- conf-name: ptf1-m + group-name: ptf1 + topo: ptf32 + ptf_image_name: docker-ptf-sai-mlnx + ptf: ptf-unknown + ptf_ip: 10.255.0.188/24 + ptf_ipv6: + server: server_1 + vm_base: + dut: + - str-msn2700-01 + comment: Test ptf Mellanox + +- conf-name: ptf2-b + group-name: ptf2 + topo: ptf64 + ptf_image_name: docker-ptf-sai-brcm + ptf: ptf-unknown + ptf_ip: 10.255.0.189/24 + ptf_ipv6: + server: server_1 + vm_base: + dut: + - lab-s6100-01 + comment: Test ptf Broadcom + +- conf-name: vms-sn2700-t1 + group-name: vms1-1 + topo: t1 + ptf_image_name: docker-ptf-sai-mlnx + ptf: ptf-unknown + ptf_ip: 10.255.0.178/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - str-msn2700-01 + comment: Tests Mellanox SN2700 vms + +- conf-name: vms-sn2700-t1-lag + group-name: vms1-1 + topo: t1-lag + ptf_image_name: docker-ptf-sai-mlnx + ptf: ptf-unknown + ptf_ip: 10.255.0.178/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - str-msn2700-01 + comment: Tests Mellanox SN2700 vms + +- conf-name: vms-sn2700-t0 + group-name: vms1-1 + topo: t0 + ptf_image_name: docker-ptf-sai-mlnx + ptf: ptf-unknown + ptf_ip: 10.255.0.178/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - str-msn2700-01 + comment: Tests Mellanox SN2700 vms + +- conf-name: vms-s6000-t0 + group-name: vms2-1 + topo: t0 + ptf_image_name: docker-ptf-sai-brcm + ptf: ptf-unknown + ptf_ip: 10.255.0.179/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - lab-s6000-01 + comment: Tests Dell S6000 vms + +- conf-name: vms-a7260-t0 + group-name: vms3-1 + topo: t0-116 + ptf_image_name: docker-ptf-sai-brcm + ptf: ptf-unknown + ptf_ip: 10.255.0.180/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - lab-a7260-01 + comment: Tests Arista A7260 vms + +- conf-name: vms-s6100-t0 + group-name: vms4-1 + topo: t0-64 + ptf_image_name: docker-ptf-sai-brcm + ptf: ptf-unknown + ptf_ip: 10.255.0.181/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - lab-s6100-01 + comment: Tests Dell S6100 vms + +- conf-name: vms-s6100-t1 + group-name: vms4-1 + topo: t1-64 + ptf_image_name: docker-ptf-sai-brcm + ptf: ptf-unknown + ptf_ip: 10.255.0.182/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - lab-s6100-01 + comment: Tests Dell S6100 vms + +- conf-name: vms-s6100-t1-lag + group-name: vms5-1 + topo: t1-64-lag + ptf_image_name: docker-ptf-sai-brcm + ptf: ptf-unknown + ptf_ip: 10.255.0.183/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - lab-s6100-01 + comment: Tests Dell S6100 vms + +- conf-name: vms-multi-dut + group-name: vms1-duts + topo: ptf64 + ptf_image_name: docker-ptf + ptf: ptf-unknown + ptf_ip: 10.255.0.184/24 + ptf_ipv6: + server: server_1 + vm_base: VM0100 + dut: + - dut-host1 + - dut-host2 + comment: Example Multi DUTs testbed + +- conf-name: vms-example-ixia-1 + group-name: vms6-1 + topo: t0-64 + ptf_image_name: docker-ptf-ixia + ptf: example-ixia-ptf-1 + ptf_ip: 10.0.0.30/32 + ptf_ipv6: + server: server_6 + vm_base: VM0600 + dut: + - example-s6100-dut-1 + comment: superman diff --git a/ansible/vtestbed.yaml b/ansible/vtestbed.yaml new file mode 100644 index 00000000000..fee3a4e3e59 --- /dev/null +++ b/ansible/vtestbed.yaml @@ -0,0 +1,67 @@ +--- + +- conf-name: vms-kvm-t0 + group-name: vms6-1 + topo: t0 + ptf_image_name: docker-ptf + ptf: ptf-01 + ptf_ip: 10.250.0.102/24 + ptf_ipv6: fec0::ffff:afa:2/64 + server: server_1 + vm_base: VM0100 + dut: + - vlab-01 + comment: Tests virtual switch vm + +- conf-name: vms-kvm-t0-64 + group-name: vms6-1 + topo: t0-64 + ptf_image_name: docker-ptf + ptf: ptf-01 + ptf_ip: 10.250.0.102/24 + ptf_ipv6: fec0::ffff:afa:2/64 + server: server_1 + vm_base: VM0100 + dut: + - vlab-02 + comment: Tests virtual switch vm + +- conf-name: vms-kvm-t1-lag + group-name: vms6-2 + topo: t1-lag + ptf_image_name: docker-ptf + ptf: ptf-02 + ptf_ip: 10.250.0.106/24 + ptf_ipv6: fec0::ffff:afa:6/64 + server: server_1 + vm_base: VM0104 + dut: + - vlab-03 + comment: Tests virtual switch vm + +- conf-name: vms-kvm-t0-2 + group-name: vms6-3 + topo: t0 + ptf_image_name: docker-ptf + ptf: ptf-03 + ptf_ip: 10.250.0.108/24 + ptf_ipv6: fec0::ffff:afa:8/64 + server: server_1 + vm_base: VM0104 + dut: + - vlab-04 + comment: Tests virtual switch vm + +- conf-name: vms-kvm-dual-t0 + group-name: vms6-4 + topo: dualtor + ptf_image_name: docker-ptf + ptf: ptf-04 + ptf_ip: 10.250.0.109/24 + ptf_ipv6: fec0::ffff:afa:9/64 + server: server_1 + vm_base: VM0108 + dut: + - vlab-05 + - vlab-06 + comment: Dual-TOR testbed diff --git a/tests/common/testbed.py b/tests/common/testbed.py new file mode 100644 index 00000000000..9eea08c7fd9 --- /dev/null +++ b/tests/common/testbed.py @@ -0,0 +1,278 @@ +""" +Testbed file related utilities. +""" +from __future__ import print_function +import argparse +import csv +import ipaddr as ipaddress +import json +import logging +import os +import re +import string +import yaml + +from collections import defaultdict +from collections import OrderedDict + + +class TestbedInfo(object): + """Parse the testbed file used to describe whole testbed info.""" + + TESTBED_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') + + def __init__(self, testbed_file): + if testbed_file.endswith(".csv"): + self.testbed_yamlfile = testbed_file.replace(".csv", ".yaml") + logging.warn( + "Deprecated CSV format testbed file, please use yaml file" + ) + if os.path.exists(self.testbed_yamlfile): + logging.debug( + "Use yaml testbed file: %s", self.testbed_yamlfile + ) + self.testbed_filename = self.testbed_yamlfile + else: + self.testbed_filename = testbed_file + elif testbed_file.endswith(".yaml"): + self.testbed_filename = testbed_file + else: + raise ValueError("Unsupported testbed file type") + + # use OrderedDict here to ensure yaml file has same order as csv. + self.testbed_topo = OrderedDict() + # use to convert from netmask to cidr + self._address_cache = {} + if self.testbed_filename.endswith(".yaml"): + self._read_testbed_topo_from_yaml() + if self.testbed_filename.endswith(".csv"): + self._read_testbed_topo_from_csv() + # create yaml testbed file + self.dump_testbeds_to_yaml() + self.parse_topo() + + def _cidr_to_ip_mask(self, network): + addr = ipaddress.IPNetwork(network) + ip_address, netmask = str(addr.ip), str(addr.netmask) + self._address_cache[(ip_address, netmask)] = network + return ip_address, netmask + + def _ip_mask_to_cidr(self, ip_address, netmask): + return self._address_cache[(ip_address, netmask)] + + def _read_testbed_topo_from_csv(self): + """Read csv testbed info file.""" + with open(self.testbed_filename) as f: + topo = csv.DictReader(f, fieldnames=self.TESTBED_FIELDS, + delimiter=',') + + # Validate all field are in the same order and are present + header = next(topo) + for field in self.TESTBED_FIELDS: + assert header[field].replace('#', '').strip() == field + + for line in topo: + if line['conf-name'].lstrip().startswith('#'): + # skip comment line + continue + if line['ptf_ip']: + line['ptf_ip'], line['ptf_netmask'] = \ + self._cidr_to_ip_mask(line['ptf_ip']) + if line['ptf_ipv6']: + line['ptf_ipv6'], line['ptf_netmask_v6'] = \ + self._cidr_to_ip_mask(line['ptf_ipv6']) + + line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']} + del line['dut'] + + self.testbed_topo[line['conf-name']] = line + + def _read_testbed_topo_from_yaml(self): + """Read yaml testbed info file.""" + with open(self.testbed_filename) as f: + tb_info = yaml.safe_load(f) + for tb in tb_info: + if tb["ptf_ip"]: + tb["ptf_ip"], tb["ptf_netmask"] = \ + self._cidr_to_ip_mask(tb["ptf_ip"]) + if tb["ptf_ipv6"]: + tb["ptf_ipv6"], tb["ptf_netmask_v6"] = \ + self._cidr_to_ip_mask(tb["ptf_ipv6"]) + tb["duts"] = tb.pop("dut") + tb["duts_map"] = {dut: i for i, dut in enumerate(tb["duts"])} + self.testbed_topo[tb["conf-name"]] = tb + + def dump_testbeds_to_yaml(self): + + def none_representer(dumper, _): + return dumper.represent_scalar("tag:yaml.org,2002:null", "") + + def ordereddict_representer(dumper, data): + value = [] + node = yaml.MappingNode("tag:yaml.org,2002:map", value) + for item_key, item_value in data.items(): + node_key = dumper.represent_data(item_key) + node_value = dumper.represent_data(item_value) + value.append((node_key, node_value)) + return node + + class IncIndentDumper(yaml.Dumper): + """ + Dumper class to increase indentation for nested list. + + Add extra indentation since py-yaml doesn't add extra + indentation for list inside mapping by default [1]. + + This also add extra blank lines between each testbed entry [2]. + + [1]: https://web.archive.org/web/20170903201521/https://pyyaml.org/ticket/64 + [2]: https://github.com/yaml/pyyaml/issues/127 + """ + def increase_indent(self, flow=False, indentless=False): + return yaml.Dumper.increase_indent(self, flow, False) + + def write_line_break(self, data=None): + yaml.Dumper.write_line_break(self, data) + if len(self.indents) == 1: + yaml.Dumper.write_line_break(self) + + testbed_data = [] + for tb_name, tb_dict in self.testbed_topo.items(): + ptf_ip, ptf_ipv6 = None, None + if tb_dict["ptf_ip"]: + ptf_ip = self._ip_mask_to_cidr(tb_dict["ptf_ip"], + tb_dict["ptf_netmask"]) + if tb_dict["ptf_ipv6"]: + ptf_ipv6 = self._ip_mask_to_cidr(tb_dict["ptf_ipv6"], + tb_dict["ptf_netmask_v6"]) + testbed_mapping = zip( + self.TESTBED_FIELDS, + [ + tb_name, + tb_dict["group-name"], + tb_dict["topo"], + tb_dict["ptf_image_name"], + tb_dict["ptf"], + ptf_ip, + ptf_ipv6, + tb_dict["server"], + tb_dict["vm_base"] or None, + tb_dict["duts"], + tb_dict["comment"] + ] + ) + testbed = OrderedDict(testbed_mapping) + testbed_data.append(testbed) + + # dump blank instead of 'null' for None + IncIndentDumper.add_representer(type(None), none_representer) + # dump testbed fields in the order same as csv + IncIndentDumper.add_representer(OrderedDict, ordereddict_representer) + + with open(self.testbed_yamlfile, "w") as yamlfile: + yaml.dump(testbed_data, yamlfile, + explicit_start=True, Dumper=IncIndentDumper) + + def get_testbed_type(self, topo_name): + pattern = re.compile(r'^(t0|t1|ptf|fullmesh|dualtor)') + match = pattern.match(topo_name) + if match == None: + raise Exception("Unsupported testbed type - {}".format(topo_name)) + tb_type = match.group() + if tb_type == 'dualtor': + # augment dualtor topology type to 't0' to avoid adding it + # everywhere. + tb_type = 't0' + return tb_type + + def _parse_dut_port_index(self, port): + """ + parse port string + + port format : dut_index.port_index@ptf_index + + """ + m = re.match("(\d+)\.(\d+)@(\d+)", port) + (dut_index, port_index, ptf_index) = (int(m.group(1)), int(m.group(2)), int(m.group(3))) + + return (dut_index, port_index, ptf_index) + + def calculate_ptf_index_map(self, line): + map = defaultdict() + + # For multi-DUT testbed, because multiple DUTs are sharing a same + # PTF docker, the ptf docker interface index will not be exactly + # match the interface index on DUT. The information is available + # in the topology facts. Get these information out and put them + # in the 2 levels dictionary as: + # { dut_index : { dut_port_index : ptf_index * } * } + + topo_facts = line['topo']['properties'] + if 'topology' not in topo_facts: + return map + + topology = topo_facts['topology'] + if 'host_interfaces' in topology: + for _ports in topology['host_interfaces']: + # Example: ['0.0,1.0', '0.1,1.1', '0.2,1.2', ... ] + # if there is no '@' then they are shared, no need to update. + ports = str(_ports) + for port in ports.split(','): + if '@' in port and '.' in port: + dut_index, port_index, ptf_index = _parse_dut_port_index(port) + if port_index != ptf_index: + # Need to add this in map + dut_dict = map[dut_index] if dut_index in map else {} + dut_dict[port_index] = ptf_index + map[dut_index] = dut_dict + + if 'VMs' in topology: + for _, vm in topology['VMs'].items(): + if 'vlans' in vm: + for _port in vm['vlans']: + # Example: ['0.31@34', '1.31@35'] + port = str(_port) + if '@' in port and '.' in port: + dut_index, port_index, ptf_index = self._parse_dut_port_index(port) + if port_index != ptf_index: + # Need to add this in map + dut_dict = map[dut_index] if dut_index in map else {} + dut_dict[port_index] = ptf_index + map[dut_index] = dut_dict + + return map + + def parse_topo(self): + for tb_name, tb in self.testbed_topo.items(): + topo = tb.pop("topo") + tb["topo"] = defaultdict() + tb["topo"]["name"] = topo + tb["topo"]["type"] = self.get_testbed_type(topo) + topo_dir = os.path.join(os.path.dirname(__file__), "../../ansible/vars/") + topo_file = os.path.join(topo_dir, "topo_{}.yml".format(topo)) + with open(topo_file, 'r') as fh: + tb['topo']['properties'] = yaml.safe_load(fh) + tb['topo']['ptf_map'] = self.calculate_ptf_index_map(tb) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description=""" + Render testbed file, input could be either CSV or yaml file. + If input is a CSV file, will dump its content as a yaml file with the + same name in same directory. + """ + ) + + file_group = parser.add_mutually_exclusive_group(required=True) + file_group.add_argument("-y", "--yaml", dest="testbed_yamlfile", help="testbed yaml file") + file_group.add_argument("-c", "--csv", dest="testbed_csvfile", help="testbed csv file") + + parser.add_argument("--print-data", help="print testbed", action="store_true") + + args = parser.parse_args() + testbedfile = args.testbed_csvfile or args.testbed_yamlfile + tbinfo = TestbedInfo(testbedfile) + if args.print_data: + print(json.dumps(tbinfo.testbed_topo, indent=4)) diff --git a/tests/conftest.py b/tests/conftest.py index 3fc79f21085..ea240734709 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,20 +3,15 @@ import json import tarfile import logging -import string -import re import getpass import random import pytest -import csv import yaml import jinja2 -import ipaddr as ipaddress from ansible.parsing.dataloader import DataLoader from ansible.inventory.manager import InventoryManager -from collections import defaultdict from datetime import datetime from tests.common.fixtures.conn_graph_facts import conn_graph_facts from tests.common.devices import Localhost @@ -24,6 +19,8 @@ from tests.common.helpers.constants import ASIC_PARAM_TYPE_ALL, ASIC_PARAM_TYPE_FRONTEND, DEFAULT_ASIC_ID from tests.common.helpers.dut_ports import encode_dut_port_name from tests.common.devices import DutHosts +from tests.common.testbed import TestbedInfo + logger = logging.getLogger(__name__) @@ -42,124 +39,6 @@ 'tests.vxlan') -class TestbedInfo(object): - """ - Parse the CSV file used to describe whole testbed info - Please refer to the example of the CSV file format - CSV file first line is title - The topology name in title is using conf-name - """ - - def __init__(self, testbed_file): - self.testbed_filename = testbed_file - self.testbed_topo = defaultdict() - CSV_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') - - with open(self.testbed_filename) as f: - topo = csv.DictReader(f, fieldnames=CSV_FIELDS, delimiter=',') - - # Validate all field are in the same order and are present - header = next(topo) - for field in CSV_FIELDS: - assert header[field].replace('#', '').strip() == field - - for line in topo: - if line['conf-name'].lstrip().startswith('#'): - ### skip comment line - continue - if line['ptf_ip']: - ptfaddress = ipaddress.IPNetwork(line['ptf_ip']) - line['ptf_ip'] = str(ptfaddress.ip) - line['ptf_netmask'] = str(ptfaddress.netmask) - - if line['ptf_ipv6']: - ptfaddress = ipaddress.IPNetwork(line['ptf_ipv6']) - line['ptf_ipv6'] = str(ptfaddress.ip) - line['ptf_netmask_v6'] = str(ptfaddress.netmask) - - line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') - del line['dut'] - - topo = line['topo'] - del line['topo'] - line['topo'] = defaultdict() - line['topo']['name'] = topo - line['topo']['type'] = self.get_testbed_type(line['topo']['name']) - with open("../ansible/vars/topo_{}.yml".format(topo), 'r') as fh: - line['topo']['properties'] = yaml.safe_load(fh) - line['topo']['ptf_map'] = self.calculate_ptf_index_map(line) - - self.testbed_topo[line['conf-name']] = line - - def get_testbed_type(self, topo_name): - pattern = re.compile(r'^(t0|t1|ptf|fullmesh|dualtor)') - match = pattern.match(topo_name) - if match == None: - raise Exception("Unsupported testbed type - {}".format(topo_name)) - tb_type = match.group() - if tb_type == 'dualtor': - # augment dualtor topology type to 't0' to avoid adding it - # everywhere. - tb_type = 't0' - return tb_type - - def _parse_dut_port_index(self, port): - """ - parse port string - - port format : dut_index.port_index@ptf_index - - """ - m = re.match("(\d+)\.(\d+)@(\d+)", port) - (dut_index, port_index, ptf_index) = (int(m.group(1)), int(m.group(2)), int(m.group(3))) - - return (dut_index, port_index, ptf_index) - - def calculate_ptf_index_map(self, line): - map = defaultdict() - - # For multi-DUT testbed, because multiple DUTs are sharing a same - # PTF docker, the ptf docker interface index will not be exactly - # match the interface index on DUT. The information is available - # in the topology facts. Get these information out and put them - # in the 2 levels dictionary as: - # { dut_index : { dut_port_index : ptf_index * } * } - - topo_facts = line['topo']['properties'] - if 'topology' not in topo_facts: - return map - - topology = topo_facts['topology'] - if 'host_interfaces' in topology: - for _ports in topology['host_interfaces']: - # Example: ['0.0,1.0', '0.1,1.1', '0.2,1.2', ... ] - # if there is no '@' then they are shared, no need to update. - ports = str(_ports) - for port in ports.split(','): - if '@' in port and '.' in port: - dut_index, port_index, ptf_index = _parse_dut_port_index(port) - if port_index != ptf_index: - # Need to add this in map - dut_dict = map[dut_index] if dut_index in map else {} - dut_dict[port_index] = ptf_index - map[dut_index] = dut_dict - - if 'VMs' in topology: - for _, vm in topology['VMs'].items(): - if 'vlans' in vm: - for _port in vm['vlans']: - # Example: ['0.31@34', '1.31@35'] - port = str(_port) - if '@' in port and '.' in port: - dut_index, port_index, ptf_index = self._parse_dut_port_index(port) - if port_index != ptf_index: - # Need to add this in map - dut_dict = map[dut_index] if dut_index in map else {} - dut_dict[port_index] = ptf_index - map[dut_index] = dut_dict - - return map - def pytest_addoption(parser): parser.addoption("--testbed", action="store", default=None, help="testbed name") parser.addoption("--testbed_file", action="store", default=None, help="testbed file name") @@ -267,7 +146,7 @@ def tbinfo(request): raise ValueError("testbed and testbed_file are required!") testbedinfo = TestbedInfo(tbfile) - return testbedinfo.testbed_topo[tbname] + return testbedinfo.testbed_topo.get(tbname, {}) @pytest.fixture(name="duthosts", scope="session")