diff --git a/README.md b/README.md index f08df0a82e6..27c094cd193 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Software for Open Networking in the Cloud - SONiC # Management - +# cls test # Description Tools for managing, configuring and monitoring SONiC diff --git a/ansible/TestbedProcessing.py b/ansible/TestbedProcessing.py index 707f6d45084..52fbcbc55d1 100644 --- a/ansible/TestbedProcessing.py +++ b/ansible/TestbedProcessing.py @@ -9,14 +9,14 @@ Requirement: python version: 2.X - python package: PyYAML 3.12 (or later) - + python package: PyYAML 3.12 (or later) + PyYaml Install Instructions: - [1] Download PyYAML from https://pyyaml.org/wiki/PyYAML + [1] Download PyYAML from https://pyyaml.org/wiki/PyYAML [2] Unpack the archive [3] Install the package by executing (python setup.py install) - [4] Test if installation was successful (python setup.py test) - + [4] Test if installation was successful (python setup.py test) + Usage: put TestbedProcessing.py and testbed.yaml under sonic-mgmt/ansible python TestbedProcessing.py @@ -25,12 +25,12 @@ Arguments: -i : the testbed.yaml file to parse -basedir : the basedir for the project - -backupdir : the backup directory for the files + -backupdir : the backup directory for the files Script Procedure - [1] Backup the files we will be copying + [1] Backup the files we will be copying [2] Load testbed.yaml into dictionaries for easy processing - [3] Generate the files via methods defined below + [3] Generate the files via methods defined below """ # ARGUMENTS TO PARSE @@ -86,7 +86,7 @@ """ represent_none(self, _) -modifies yaml to replace null values with blanks +modifies yaml to replace null values with blanks SOURCE: https://stackoverflow.com/questions/37200150/can-i-dump-blank-instead-of-null-in-yaml-pyyaml/37201633#3720163 """ def represent_none(self, _): @@ -98,7 +98,7 @@ def represent_none(self, _): generateDictionary(data, result, category) @:parameter data - the dictionary to iterate through @:parameter result - the resulting dictionary -Generates the dictionaries that are used when creating csv, yml, or text files +Generates the dictionaries that are used when creating csv, yml, or text files """ def generateDictionary(data, result, category): for key, value in data[category].items(): @@ -108,7 +108,7 @@ def generateDictionary(data, result, category): """ makeMain(data, outfile) @:parameter data - the dictionary to look through -@:parameter outfile - the file to write to +@:parameter outfile - the file to write to makeMain generates the vm_host/main.yml file it pulls two sets of information; dictionary data and proxy data """ @@ -122,7 +122,9 @@ def makeMain(data, outfile): "skip_image_downloading": veos.get("skip_image_downloading"), "vm_console_base": veos.get("vm_console_base"), "memory": veos.get("memory"), - "max_fp_num": veos.get("max_fp_num") + "max_fp_num": veos.get("max_fp_num"), + "ptf_bp_ip": veos.get("ptf_bp_ip"), + "ptf_bp_ipv6": veos.get("ptf_bp_ipv6") } proxy = { "proxy_env": { @@ -141,21 +143,21 @@ def makeMain(data, outfile): @:parameter data - the dictionary to look for (in this case: veos) @:parameter outfile - the file to write to generates /group_vars/vm_host/creds.yml -pulls ansible_user, ansible_password, ansible_sudo_pass from vm_host_ansible into a dictionary +pulls ansible_user, ansible_password, ansible_become_pass from vm_host_ansible into a dictionary """ def makeVMHostCreds(data, outfile): veos = data result = { "ansible_user": veos.get("vm_host_ansible").get("ansible_user"), "ansible_password": veos.get("vm_host_ansible").get("ansible_password"), - "ansible_sudo_password": veos.get("vm_host_ansible").get("ansible_sudo_pass") + "ansible_become_pass": veos.get("vm_host_ansible").get("ansible_become_pass") } with open(outfile, "w") as toWrite: toWrite.write("---\n") yaml.dump(result, stream=toWrite, default_flow_style=False) """ -makeSonicLabDevices(data, outfile) +makeSonicLabDevices(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) @:parameter outfile - the file to write to generates files/sonic_lab_devices.csv by pulling hostname, managementIP, hwsku, and type @@ -190,14 +192,14 @@ def makeSonicLabDevices(data, outfile): """ -makeTestbed(data, outfile) +makeTestbed(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) @:parameter outfile - the file to write to -generates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, ptf_ip, server, vm_base, dut, and comment +generates /testbed.csv by pulling confName, groupName, topo, ptf_image_name, ptf_ip, ptf_ipv6, server, vm_base, dut, and comment error handling: checks if attribute values are None type or string "None" """ def makeTestbed(data, outfile): - csv_columns = "# conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,comment" + csv_columns = "# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment" topology = data csv_file = outfile @@ -210,9 +212,11 @@ def makeTestbed(data, outfile): topo = groupDetails.get("topo") ptf_image_name = groupDetails.get("ptf_image_name") ptf_ip = groupDetails.get("ptf_ip") + ptf_ipv6 = groupDetails.get("ptf_ipv6") server = groupDetails.get("server") vm_base = groupDetails.get("vm_base") dut = groupDetails.get("dut") + ptf = groupDetails.get("ptf") comment = groupDetails.get("comment") # catch empty types @@ -224,16 +228,20 @@ def makeTestbed(data, outfile): ptf_image_name = "" if not ptf_ip: ptf_ip = "" + if not ptf_ipv6: + ptf_ipv6 = "" if not server: server = "" if not vm_base: vm_base = "" if not dut: dut = "" + if not ptf: + ptf = "" if not comment: comment = "" - row = confName + "," + groupName + "," + topo + "," + ptf_image_name + "," + ptf_ip + "," + server + "," + vm_base + "," + dut + "," + comment + row = confName + "," + groupName + "," + topo + "," + ptf_image_name + "," + ptf + "," + ptf_ip + "," + ptf_ipv6 + ","+ server + "," + vm_base + "," + dut + "," + comment f.write(row + "\n") except IOError: print("I/O error: issue creating testbed.csv") @@ -242,9 +250,9 @@ def makeTestbed(data, outfile): """ makeSonicLabLinks(data, outfile) @:parameter data - the dictionary to look through (devices dictionary) -@:parameter outfile - the file to write to +@:parameter outfile - the file to write to generates /files/sonic_lab_links.csv by pulling startPort, endPort, bandWidth, vlanID, vlanMode -error handling: checks if attribute values are None type or string "None" +error handling: checks if attribute values are None type or string "None" """ def makeSonicLabLinks(data, outfile): csv_columns = "StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode" @@ -305,7 +313,7 @@ def makeEOSCreds(data, outfile): """ makeFanout_secrets(data, outfile) @:parameter data - reads from devices dictionary -@:parameter outfile - the file to write to +@:parameter outfile - the file to write to Makes /group_vars/fanout/secrets.yml Finds the fanout secret credentials by using "fanout" as the value to search for under device_type Under github and personal topology configuration, there is only one designated fanout switch credential @@ -425,7 +433,7 @@ def makeLab(data, devices, testbed, outfile): """ makeVeos(data, veos, devices, outfile) @:parameter data - reads from either veos-groups, this helps separate the function into 3 components; children, host, vars -@:parameter veos - reads from either veos +@:parameter veos - reads from either veos @:parameter devices - reads from devices @:parameter outfile - writes to veos """ @@ -484,18 +492,16 @@ def makeHostVar(data): """ updateDockerRegistry -@:parameter outfile - the file to write to -hard codes the docker registry to search locally rather than externally +@:parameter outfile - the file to write to +hard codes the docker registry to search locally rather than externally """ def updateDockerRegistry(docker_registry, outfile): - if (not docker_registry.get("docker_registry_host")) or (not docker_registry.get("docker_registry_username")) or (not docker_registry.get("docker_registry_password")): + if not docker_registry.get("docker_registry_host"): print("\t\tREGISTRY FIELD BLANK - SKIPPING THIS STEP") else: with open(outfile, "w") as toWrite: toWrite.write("docker_registry_host: " + docker_registry.get("docker_registry_host")) toWrite.write("\n\n") - toWrite.write("docker_registry_username: " + docker_registry.get("docker_registry_username") + "\n") - toWrite.write("docker_registry_password: root" + docker_registry.get("docker_registry_password")) def main(): @@ -571,4 +577,4 @@ def main(): if __name__ == '__main__': main() - \ No newline at end of file + diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index 65e6d468e96..0498a331f10 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -20,6 +20,7 @@ # -e topo=t0 - the name of topology to generate minigraph file # -e testbed_name=vms1-1 - the testbed name specified in testbed.csv file # (if you give 'testbed_name' option, will use info from testbed and ignore topo and vm_base options) +# -e vm_file=veos - the virtual machine file name # -e deploy=True - if deploy the newly generated minigraph to the targent DUT, default is false if not defined # -e save=True - if save the newly generated minigraph to the targent DUT as starup-config, default is false if not defined # @@ -40,12 +41,17 @@ testbed_file: testbed.csv when: testbed_file is not defined + - name: Set default dut index + set_fact: + dut_index: 0 + when: dut_index is not defined + - name: Gathering testbed information test_facts: testbed_name="{{ testbed_name }}" testbed_file="{{ testbed_file }}" - connection: local + delegate_to: localhost - fail: msg="The DUT you are trying to run test does not belongs to this testbed" - when: testbed_facts['dut'] != inventory_hostname + when: testbed_facts['duts'][dut_index] != inventory_hostname - name: set testbed_type set_fact: @@ -58,15 +64,21 @@ when: testbed_name is defined - topo_facts: topo={{ topo }} - connection: local + delegate_to: localhost + + - name: set default vm file path + set_fact: + vm_file: veos + when: vm_file is not defined - set_fact: VM_topo: "{% if 'ptf' in topo %}False{% else %}True{% endif %}" remote_dut: "{{ ansible_ssh_host }}" - name: gather testbed VM informations - testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }} - connection: local + # testbed_vm_info: base_vm={{ testbed_facts['vm_base'] }} topo={{ testbed_facts['topo'] }} vm_file={{ vm_file }} + testbed_vm_info: base_vm="{{vm_base}}" topo="{{topo}}" + delegate_to: localhost when: "VM_topo | bool" - name: find interface name mapping and individual interface speed if defined @@ -80,29 +92,29 @@ set_fact: vlan_intfs: "{{ vlan_intfs|default([])}} + ['{{ port_alias[item] }}' ]" with_items: "{{ host_if_indexes }}" - when: ("'host_interfaces' in vm_topo_config") and ("'tor' in vm_topo_config['dut_type'] | lower") + when: "('host_interfaces' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)" - name: find all interface indexes mapping connecting to VM set_fact: interface_to_vms: "{{ interface_to_vms|default({}) | combine({ item.key: item.value['interface_indexes'] }) }}" - with_dict: vm_topo_config['vm'] + with_dict: "{{ vm_topo_config['vm'] }}" - name: find all interface indexes connecting to VM set_fact: ifindex_to_vms: "{{ ifindex_to_vms|default([]) }} + {{ item.value['interface_indexes']}}" - with_dict: vm_topo_config['vm'] + with_dict: "{{ vm_topo_config['vm'] }}" - name: find all interface names set_fact: intf_names: "{{ intf_names | default({}) | combine({item.key: port_alias[item.value[0]|int:item.value[-1]|int+1] }) }}" - with_dict: interface_to_vms + with_dict: "{{ interface_to_vms }}" - name: create minigraph file in ansible minigraph folder template: src=templates/minigraph_template.j2 dest=minigraph/{{ inventory_hostname}}.{{ topo }}.xml - connection: local + delegate_to: localhost when: local_minigraph is defined and local_minigraph|bool == true - + - block: - name: Init telemetry keys set_fact: @@ -169,7 +181,6 @@ -out "{{ dsmsroot_cer }}" become: true - - block: - name: saved original minigraph file in SONiC DUT(ignore errors when file doesnot exist) shell: mv /etc/sonic/minigraph.xml /etc/sonic/minigraph.xml.orig @@ -188,8 +199,8 @@ delegate_to: localhost - name: debug print stat_result - debug: - msg: Stat result is {{ stat_result }} + debug: + msg: Stat result is {{ stat_result }} - name: Copy corresponding configlet files if exist copy: src=vars/configlet/{{ topo }}/ @@ -258,12 +269,20 @@ regexp: '^enabled=' line: 'enabled=false' become: true + register: updategraph_conf - name: restart automatic minigraph update service become: true service: name: updategraph state: restarted + when: updategraph_conf.changed + + - name: docker status + shell: docker ps + register: docker_status + + - debug: msg={{ docker_status.stdout_lines }} - name: execute cli "config load_minigraph -y" to apply new minigraph become: true diff --git a/ansible/fanout_connect.yml b/ansible/fanout_connect.yml index d6020d6852c..2406bf0d41f 100644 --- a/ansible/fanout_connect.yml +++ b/ansible/fanout_connect.yml @@ -25,5 +25,5 @@ - set_fact: connect_leaf=false - - include: roles/fanout/tasks/rootfanout_connect.yml - when: external_port is defined +# - include: roles/fanout/tasks/rootfanout_connect.yml +# when: external_port is defined diff --git a/ansible/files/creategraph.py b/ansible/files/creategraph.py index 3ca637f89ea..41482d77225 100755 --- a/ansible/files/creategraph.py +++ b/ansible/files/creategraph.py @@ -63,7 +63,8 @@ def generate_dpg(self): for dev in self.devices: hostname = dev.get('Hostname', '') managementip = dev.get('ManagementIp', '') - if hostname and 'fanout' in dev['Type'].lower(): + devtype = dev['Type'].lower() + if hostname and ('fanout' in devtype or 'ixiachassis' in devtype): ###### Build Management interface IP here, if we create each device indivial minigraph file, we may comment this out l3inforoot = etree.SubElement(self.dpgroot, 'DevicesL3Info', {'Hostname': hostname}) etree.SubElement(l3inforoot, 'ManagementIPInterface', {'Name': 'ManagementIp', 'Prefix': managementip}) diff --git a/ansible/files/sonic_lab_devices.csv b/ansible/files/sonic_lab_devices.csv index b1c94cf022e..8e8201bb13e 100644 --- a/ansible/files/sonic_lab_devices.csv +++ b/ansible/files/sonic_lab_devices.csv @@ -1,10 +1,9 @@ - +Hostname,ManagementIp,HwSku,Type cel-e1031-01,10.250.0.100/23,Celestica-E1031-T48S4,DevSonic cel-seastone-01,10.251.0.100/23,Celestica-DX010-C32,DevSonic cel-seastone-02,10.250.0.100/23,Seastone-DX010-10-50,DevSonic cel-seastone-03,10.250.0.100/23,Seastone-DX010-50,DevSonic e1031-fanout,10.250.0.235/23,Celestica-E1031-T48S4,FanoutLeafSonic -Hostname,ManagementIp,HwSku,Type seastone-fanout,10.251.0.235/23,Celestica-DX010-C32,FanoutLeafSonic str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf diff --git a/ansible/files/sonic_lab_links.csv b/ansible/files/sonic_lab_links.csv index d0a5df28ee7..4fc17810bd9 100644 --- a/ansible/files/sonic_lab_links.csv +++ b/ansible/files/sonic_lab_links.csv @@ -1,3 +1,4 @@ +StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode cel-e1031-01,Ethernet1,e1031-fanout,Ethernet1,1000,100,Access cel-e1031-01,Ethernet2,e1031-fanout,Ethernet2,1000,101,Access cel-e1031-01,Ethernet3,e1031-fanout,Ethernet3,1000,102,Access @@ -61,4 +62,3 @@ cel-seastone-01,Ethernet108,seastone-fanout,Ethernet108,100000,127,Access cel-seastone-01,Ethernet112,seastone-fanout,Ethernet112,100000,128,Access cel-seastone-01,Ethernet116,seastone-fanout,Ethernet116,100000,129,Access cel-seastone-01,Ethernet120,seastone-fanout,Ethernet120,100000,130,Access -StartDevice,StartPort,EndDevice,EndPort,BandWidth,VlanID,VlanMode diff --git a/ansible/group_vars/all/creds.yml b/ansible/group_vars/all/creds.yml index 2b4d1c8ea09..d301b63c6dc 100644 --- a/ansible/group_vars/all/creds.yml +++ b/ansible/group_vars/all/creds.yml @@ -2,6 +2,7 @@ eos_default_login: "admin" eos_default_password: "" eos_login: admin eos_password: 123456 +eos_root_user: root eos_root_password: 123456 sonic_login: "admin" diff --git a/ansible/group_vars/vm_host/creds.yml b/ansible/group_vars/vm_host/creds.yml index 069c732ade6..bcf80c488b8 100644 --- a/ansible/group_vars/vm_host/creds.yml +++ b/ansible/group_vars/vm_host/creds.yml @@ -1,5 +1,5 @@ --- ansible_user: clsnet ansible_password: 123456 -ansible_sudo_pass: 123456 +ansible_become_password: 123456 diff --git a/ansible/host_vars/STR-ACS-SERV-03.yml b/ansible/host_vars/STR-ACS-SERV-03.yml new file mode 100644 index 00000000000..8af81ed8629 --- /dev/null +++ b/ansible/host_vars/STR-ACS-SERV-03.yml @@ -0,0 +1,4 @@ +mgmt_bridge: br2 +mgmt_prefixlen: 23 +mgmt_gw: 10.251.0.1 +external_port: enp175s0f0 diff --git a/ansible/lab b/ansible/lab old mode 100644 new mode 100755 index 7ed16ffa498..5b8e71ecc0e --- a/ansible/lab +++ b/ansible/lab @@ -1,5 +1,5 @@ [sonic_slx] -cel-seastone-01 ansible_host=10.251.0.100 +cel-seastone-01 ansible_host=10.251.0.100 pdu_host=pdu-1 [sonic_slx:vars] hwsku="Celestica-DX010-C32" @@ -33,3 +33,6 @@ mgmt_subnet_mask_length="24" seastone-fanout ansible_host=10.251.0.235 os=sonic ansible_ssh_user=admin ansible_ssh_pass=password e1031-fanout ansible_host=10.250.0.235 os=sonic ansible_ssh_user=admin ansible_ssh_pass=password +[pdu] +pdu-1 ansible_host=10.204.112.55 protocol=snmp + diff --git a/ansible/library/bgp_facts.py b/ansible/library/bgp_facts.py index 50be48c73a6..5d757ac63e4 100644 --- a/ansible/library/bgp_facts.py +++ b/ansible/library/bgp_facts.py @@ -48,34 +48,49 @@ class BgpModule(object): def __init__(self): + self.instances =[] self.module = AnsibleModule( argument_spec=dict( + num_npus=dict(type='int', default=1), + instance_id=dict(type='int'), ), supports_check_mode=True) + m_args = self.module.params + if 'instance_id' in m_args and m_args['instance_id'] is not None: + self.instances.append("bgp{}".format(m_args['instance_id'])) + else: + npus = m_args['num_npus'] + if npus > 1: + for npu in range(0, npus): + self.instances.append("bgp{}".format(npu)) + else: + self.instances.append("bgp") + self.out = None self.facts = {} - + self.facts['bgp_neighbors'] = {} return def run(self): """ Main method of the class """ - self.collect_data('summary') - self.parse_summary() - self.collect_data('neighbor') - self.parse_neighbors() - self.get_statistics() + for instance in self.instances: + self.collect_data('summary', instance) + self.parse_summary() + self.collect_data('neighbor',instance) + self.parse_neighbors() + self.get_statistics() self.module.exit_json(ansible_facts=self.facts) - def collect_data(self, command_str): + def collect_data(self, command_str, instance): """ Collect bgp information by reading output of 'vtysh' command line tool """ + docker_cmd = 'docker exec -i {} vtysh -c "show ip bgp {}" '.format(instance,command_str) try: - rc, self.out, err = self.module.run_command('docker exec -i bgp vtysh -c "show ip bgp ' + command_str + '"', - executable='/bin/bash', use_unsafe_shell=True) + rc, self.out, err = self.module.run_command(docker_cmd, executable='/bin/bash', use_unsafe_shell=True) except Exception as e: self.module.fail_json(msg=str(e)) @@ -106,6 +121,10 @@ def parse_neighbors(self): regex_conn_dropped = re.compile(r'.*Connections established \d+; dropped (\d+)') regex_peer_group = re.compile(r'.*Member of peer-group (.*) for session parameters') regex_subnet = re.compile(r'.*subnet range group: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\/\d{1,2})') + regex_cap_gr = re.compile(r'.*Graceful Restart Capabilty: (\w+)') + regex_cap_gr_peer_restart_time = re.compile(r'.*Remote Restart timer is (\d+)') + regex_cap_gr_peer_af_ip4 = re.compile(r'.*IPv4 Unicast\((.*)\)') + regex_cap_gr_peer_af_ip6 = re.compile(r'.*IPv6 Unicast\((.*)\)') neighbors = {} @@ -117,11 +136,13 @@ def parse_neighbors(self): # ignore empty rows if 'BGP' in n: neighbor = {} + capabilities = {} message_stats = {} n = "BGP neighbor is" + n lines = n.splitlines() neighbor['admin'] = 'up' neighbor['accepted prefixes'] = 0 + neighbor_ip = None for line in lines: if regex_ipv4.match(line): @@ -143,6 +164,11 @@ def parse_neighbors(self): if regex_peer_group.match(line): neighbor['peer group'] = regex_peer_group.match(line).group(1) if regex_subnet.match(line): neighbor['subnet'] = regex_subnet.match(line).group(1) + if regex_cap_gr.match(line): capabilities['graceful restart'] = regex_cap_gr.match(line).group(1).lower() + if regex_cap_gr_peer_restart_time.match(line): capabilities['peer restart timer'] = int(regex_cap_gr_peer_restart_time.match(line).group(1)) + if regex_cap_gr_peer_af_ip4.match(line): capabilities['peer af ipv4 unicast'] = regex_cap_gr_peer_af_ip4.match(line).group(1).lower() + if regex_cap_gr_peer_af_ip6.match(line): capabilities['peer af ipv6 unicast'] = regex_cap_gr_peer_af_ip6.match(line).group(1).lower() + if regex_stats.match(line): try: key, values = line.split(':') @@ -155,15 +181,19 @@ def parse_neighbors(self): except Exception as e: print"NonFatal: line:'{}' should not have matched for sent/rcvd count".format(line) + if capabilities: + neighbor['capabilities'] = capabilities + if message_stats: neighbor['message statistics'] = message_stats - neighbors[neighbor_ip] = neighbor + if neighbor_ip: + neighbors[neighbor_ip] = neighbor except Exception as e: self.module.fail_json(msg=str(e)) - self.facts['bgp_neighbors'] = neighbors + self.facts['bgp_neighbors'].update(neighbors) return def get_statistics(self): diff --git a/ansible/library/conn_graph_facts.py b/ansible/library/conn_graph_facts.py index 0db38699f96..df64b0a8267 100755 --- a/ansible/library/conn_graph_facts.py +++ b/ansible/library/conn_graph_facts.py @@ -17,8 +17,28 @@ Retrive lab fanout switches physical and vlan connections add to Ansible facts options: - host: [fanout switch name|Server name|Sonic Switch Name] - requred: True + host: + [fanout switch name|Server name|Sonic Switch Name] + required: False + hosts: + List of hosts. Applicable for multi-DUT and single-DUT setup. The host option for single DUT setup is kept + for backward compatibility. + required: False + anchor: + List of hosts. When no host and hosts is provided, the anchor option must be specified with list of hosts. + This option is to supply the relevant list of hosts for looking up the connection graph xml file which has + all the supplied hosts. The whole graph will be returned when this option is used. This is for configuring + the root fanout switch. + required: False + filepath: + Path of the connection graph xml file. Override the default path for looking up connection graph xml file. + required: False + filename: + Name of the connection graph xml file. Override the behavior of looking up connection graph xml file. When + this option is specified, always use the specified connection graph xml file. + required: False + + Mutually exclusive options: host, hosts, anchor Ansible_facts: device_info: The device(host) type and hwsku @@ -68,9 +88,6 @@ ''' -LAB_CONNECTION_GRAPH_FILE = 'lab_connection_graph.xml' -LAB_GRAPHFILE_PATH = 'files/' - class Parse_Lab_Graph(): """ Parse the generated lab physical connection graph and insert Ansible fact of the graph @@ -188,6 +205,8 @@ def get_host_vlan(self, hostname): self.vlanport[hostname] = {} for port in self.links[hostname]: peerdevice = self.links[hostname][port]['peerdevice'] + if self.devices[peerdevice]["Type"].lower() == "devsonic": + continue peerport = self.links[hostname][port]['peerport'] peerportmode = self.vlanport[peerdevice][peerport]['mode'] peervlanids = self.vlanport[peerdevice][peerport]['vlanids'] @@ -229,42 +248,110 @@ def get_host_connections(self, hostname): else: return self.links + def contains_hosts(self, hostnames): + return set(hostnames) <= set(self.devices) + + +LAB_CONNECTION_GRAPH_FILE = 'graph_files.yml' +EMPTY_GRAPH_FILE = 'empty_graph.xml' +LAB_GRAPHFILE_PATH = 'files/' + +""" + Find a graph file contains all devices in testbed. + duts are spcified by hostnames + + Parameters: + hostnames: list of duts in the target testbed. +""" +def find_graph(hostnames): + filename = os.path.join(LAB_GRAPHFILE_PATH, LAB_CONNECTION_GRAPH_FILE) + with open(filename) as fd: + file_list = yaml.safe_load(fd) + + # Finding the graph file contains all duts from hostnames, + for fn in file_list: + filename = os.path.join(LAB_GRAPHFILE_PATH, fn) + lab_graph = Parse_Lab_Graph(filename) + lab_graph.parse_graph() + if lab_graph.contains_hosts(hostnames): + return lab_graph + + # Fallback to return an empty connection graph, this is + # needed to bridge the kvm test needs. The KVM test needs + # A graph file, which used to be whatever hardcoded file. + # Here we provide one empty file for the purpose. + lab_graph = Parse_Lab_Graph(os.path.join(LAB_GRAPHFILE_PATH, EMPTY_GRAPH_FILE)) + lab_graph.parse_graph() + return lab_graph + def main(): module = AnsibleModule( argument_spec=dict( host=dict(required=False), + hosts=dict(required=False, type='list'), filename=dict(required=False), + filepath=dict(required=False), + anchor=dict(required=False, type='list'), ), + mutually_exclusive=[['host', 'hosts', 'anchor']], supports_check_mode=True ) m_args = module.params - hostname = m_args['host'] + + hostnames = m_args['hosts'] + anchor = m_args['anchor'] + if not hostnames: + hostnames = [m_args['host']] try: + # When called by pytest, the file path is obscured to /tmp/.../. + # we need the caller to tell us where the graph files are with + # filepath argument. + if m_args['filepath']: + global LAB_GRAPHFILE_PATH + LAB_GRAPHFILE_PATH = m_args['filepath'] + if m_args['filename']: - filename = m_args['filename'] + filename = os.path.join(LAB_GRAPHFILE_PATH, m_args['filename']) + lab_graph = Parse_Lab_Graph(filename) + lab_graph.parse_graph() else: - filename = LAB_GRAPHFILE_PATH + LAB_CONNECTION_GRAPH_FILE - lab_graph = Parse_Lab_Graph(filename) - lab_graph.parse_graph() - dev = lab_graph.get_host_device_info(hostname) - if dev is None: - module.fail_json(msg="cannot find info for "+hostname) - results = {} - results['device_info'] = lab_graph.get_host_device_info(hostname) - results['device_conn'] = lab_graph.get_host_connections(hostname) - if lab_graph.get_host_vlan(hostname): - results['device_vlan_range'] = lab_graph.get_host_vlan(hostname)['VlanRange'] - results['device_vlan_list'] = lab_graph.get_host_vlan(hostname)['VlanList'] - results['device_port_vlans'] = lab_graph.get_host_port_vlans(hostname) + # When calling passed in anchor instead of hostnames, + # the caller is asking to return the whole graph. This + # is needed when configuring the root fanout switch. + target = anchor if anchor else hostnames + lab_graph = find_graph(target) + + device_info = [] + device_conn = [] + device_port_vlans = [] + device_vlan_range = [] + device_vlan_list = [] + for hostname in hostnames: + dev = lab_graph.get_host_device_info(hostname) + if dev is None: + module.fail_json(msg="cannot find info for %s" % hostname) + device_info.append(dev) + device_conn.append(lab_graph.get_host_connections(hostname)) + host_vlan = lab_graph.get_host_vlan(hostname) + # for multi-DUTs, must ensure all have vlan configured. + if host_vlan: + device_vlan_range.append(host_vlan["VlanRange"]) + device_vlan_list.append(host_vlan["VlanList"]) + device_port_vlans.append(lab_graph.get_host_port_vlans(hostname)) + results = {k: v for k, v in locals().items() + if (k.startswith("device_") and v)} + + # flatten the lists for single host + if m_args['hosts'] is None: + results = {k: v[0] for k, v in results.items()} + module.exit_json(ansible_facts=results) except (IOError, OSError): - module.fail_json(msg="Can not find lab graph file "+LAB_CONNECTION_GRAPH_FILE) + module.fail_json(msg="Can not find lab graph file under {}".format(LAB_GRAPHFILE_PATH)) except Exception as e: module.fail_json(msg=traceback.format_exc()) from ansible.module_utils.basic import * -if __name__== "__main__": +if __name__ == "__main__": main() - - diff --git a/ansible/library/docker.py b/ansible/library/docker.py index 545bbd592e6..9c40ca195ef 100644 --- a/ansible/library/docker.py +++ b/ansible/library/docker.py @@ -487,6 +487,8 @@ def _human_to_bytes(number): if isinstance(number, int): return number + if isinstance(number, str) and number.isdigit(): + return int(number) if number[-1] == suffixes[0] and number[-2].isdigit(): return number[:-1] diff --git a/ansible/library/extract_log.py b/ansible/library/extract_log.py index 032771ef51f..b80af9e4a6d 100644 --- a/ansible/library/extract_log.py +++ b/ansible/library/extract_log.py @@ -80,8 +80,6 @@ from datetime import datetime from ansible.module_utils.basic import * -from pprint import pprint - def extract_lines(directory, filename, target_string): path = os.path.join(directory, filename) @@ -96,10 +94,12 @@ def extract_lines(directory, filename, target_string): # been '\x00's in front of the log entry timestamp which # messes up with the comparator. # Prehandle lines to remove these sub-strings - result = [(filename, line.replace('\x00', '')) for line in file if target_string in line and 'nsible' not in line] + dt = datetime.datetime.fromtimestamp(os.path.getctime(path)) + result = [(filename, dt, line.replace('\x00', '')) for line in file if target_string in line and 'nsible' not in line] return result + def extract_number(s): """Extracts number from string, if not number found returns 0""" ns = re.findall(r'\d+', s) @@ -109,19 +109,30 @@ def extract_number(s): return int(ns[0]) -def convert_date(s): +def convert_date(fct, s): dt = None re_result = re.findall(r'^\S{3}\s{1,2}\d{1,2} \d{2}:\d{2}:\d{2}\.?\d*', s) + # Workaround for pytest-ansible + loc = locale.getlocale() + locale.setlocale(locale.LC_ALL, (None, None)) + if len(re_result) > 0: - str_date = re_result[0] + str_date = '{:04d} '.format(fct.year) + re_result[0] try: - dt = datetime.strptime(str_date, '%b %d %X.%f') + dt = datetime.datetime.strptime(str_date, '%Y %b %d %X.%f') except ValueError: - dt = datetime.strptime(str_date, '%b %d %X') + dt = datetime.datetime.strptime(str_date, '%Y %b %d %X') + # Handle the wrap around of year (Dec 31 to Jan 1) + # Generally, last metadata change time should be larger than generated log message timestamp + # but we still perform some wrap around test to avoid the race condition + # 183 is the number of days in half year, just a reasonable choice + if (dt - fct).days > 183: + dt.replace(year = dt.year - 1) else: re_result = re.findall(r'^\d{4}-\d{2}-\d{2}\.\d{2}:\d{2}:\d{2}\.\d{6}', s) str_date = re_result[0] - dt = datetime.strptime(str_date, '%Y-%m-%d.%X.%f') + dt = datetime.datetime.strptime(str_date, '%Y-%m-%d.%X.%f') + locale.setlocale(locale.LC_ALL, loc) return dt @@ -130,8 +141,8 @@ def comparator(l, r): nl = extract_number(l[0]) nr = extract_number(r[0]) if nl == nr: - dl = convert_date(l[1]) - dr = convert_date(r[1]) + dl = convert_date(l[1], l[2]) + dr = convert_date(r[1], r[2]) if dl == dr: return 0 elif dl < dr: @@ -219,7 +230,7 @@ def combine_logs_and_save(directory, filenames, start_string, target_filename): def extract_log(directory, prefixname, target_string, target_filename): filenames = list_files(directory, prefixname) - file_with_latest_line, latest_line = extract_latest_line_with_string(directory, filenames, target_string) + file_with_latest_line, file_create_time, latest_line = extract_latest_line_with_string(directory, filenames, target_string) files_to_copy = calculate_files_to_copy(filenames, file_with_latest_line) combine_logs_and_save(directory, files_to_copy, latest_line, target_filename) diff --git a/ansible/library/interface_facts.py b/ansible/library/interface_facts.py index 83729d820fe..eb48e9dce49 100644 --- a/ansible/library/interface_facts.py +++ b/ansible/library/interface_facts.py @@ -108,7 +108,7 @@ def main(): module = AnsibleModule( argument_spec=dict( ip_path=dict(required=False, default="/sbin/ip"), - up_ports=dict(default={}), + up_ports=dict(type='raw', default={}), ), supports_check_mode=False) @@ -301,7 +301,7 @@ def parse_ip_output(module, output, secondary=False): down_ports += [name] except: down_ports += [name] - pass + pass results['ansible_interface_facts'] = interfaces results['ansible_interface_ips'] = ips diff --git a/ansible/library/lldp_facts.py b/ansible/library/lldp_facts.py index 010a9d03d3e..61d6670f9e6 100644 --- a/ansible/library/lldp_facts.py +++ b/ansible/library/lldp_facts.py @@ -57,7 +57,7 @@ EXAMPLES = ''' # Gather LLDP facts with SNMP version 2 - snmp_facts: host={{ inventory_hostname }} version=2c community=public - connection: local + delegate_to: localhost # Gather LLDP facts using SNMP version 3 - lldp_facts: @@ -142,7 +142,7 @@ def main(): supports_check_mode=False) m_args = module.params - + if not has_pysnmp: module.fail_json(msg='Missing required pysnmp module (check docs)') @@ -152,14 +152,14 @@ def main(): if m_args['version'] == "v2" or m_args['version'] == "v2c": if not m_args['community']: module.fail_json(msg='Community not set when using snmp version 2') - + if m_args['version'] == "v3": if m_args['username'] is None: module.fail_json(msg='Username not set when using snmp version 3') if m_args['level'] == "authPriv" and m_args['privacy'] == None: module.fail_json(msg='Privacy algorithm not set when using authPriv') - + if m_args['integrity'] == "sha": integrity_proto = cmdgen.usmHMACSHAAuthProtocol elif m_args['integrity'] == "md5": @@ -169,7 +169,7 @@ def main(): privacy_proto = cmdgen.usmAesCfb128Protocol elif m_args['privacy'] == "des": privacy_proto = cmdgen.usmDESPrivProtocol - + # Use SNMP Version 2 if m_args['version'] == "v2" or m_args['version'] == "v2c": snmp_auth = cmdgen.CommunityData(m_args['community']) @@ -188,7 +188,7 @@ def main(): v = DefineOid(dotprefix=False) Tree = lambda: defaultdict(Tree) - + results = Tree() host = m_args['host'] @@ -222,9 +222,9 @@ def main(): lldp_rem_port_desc = dict() lldp_rem_chassis_id = dict() lldp_rem_sys_desc = dict() - + vbd = [] - + for var_binds in var_table: for oid, val in var_binds: current_oid = oid.prettyPrint() @@ -233,7 +233,7 @@ def main(): vbd.append(current_val) try: - if_name = inverse_if_table[str(current_oid.split(".")[-2])] + if_name = inverse_if_table[str(current_oid.split(".")[-2])] except Exception as e: print json.dumps({ "unbound_interface_index": str(current_oid.split(".")[-2]) @@ -259,10 +259,10 @@ def main(): lldp_data = dict() for intf in lldp_rem_sys.viewkeys(): - lldp_data[intf] = {'neighbor_sys_name': lldp_rem_sys[intf], - 'neighbor_port_desc': lldp_rem_port_desc[intf], - 'neighbor_port_id': lldp_rem_port_id[intf], - 'neighbor_sys_desc': lldp_rem_sys_desc[intf], + lldp_data[intf] = {'neighbor_sys_name': lldp_rem_sys[intf], + 'neighbor_port_desc': lldp_rem_port_desc[intf], + 'neighbor_port_id': lldp_rem_port_id[intf], + 'neighbor_sys_desc': lldp_rem_sys_desc[intf], 'neighbor_chassis_id': lldp_rem_chassis_id[intf]} diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py index 6981a4acf63..15f4eeaad8d 100644 --- a/ansible/library/minigraph_facts.py +++ b/ansible/library/minigraph_facts.py @@ -213,6 +213,9 @@ def parse_dpg(dpg, hname): pcmbr_list[i] = port_alias_to_name_map[member] ports[port_alias_to_name_map[member]] = {'name': port_alias_to_name_map[member], 'alias': member} pcs[pcintfname] = {'name': pcintfname, 'members': pcmbr_list} + fallback_node = pcintf.find(str(QName(ns, "Fallback"))) + if fallback_node is not None: + pcs[pcintfname]['fallback'] = fallback_node.text ports.pop(pcintfname) vlanintfs = child.find(str(QName(ns, "VlanInterfaces"))) @@ -467,7 +470,7 @@ def parse_xml(filename, hostname): elif hwsku == "Arista-7260CX3-C64" or hwsku == "Arista-7170-64C": for i in range(1, 65): port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4) - elif hwsku == "Arista-7060CX-32S-C32" or hwsku == "Arista-7060CX-32S-C32-T1": + elif hwsku == "Arista-7060CX-32S-C32" or hwsku == "Arista-7060CX-32S-Q32" or hwsku == "Arista-7060CX-32S-C32-T1": for i in range(1, 33): port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4) elif hwsku == "Mellanox-SN2700-D48C8": @@ -531,6 +534,12 @@ def parse_xml(filename, hostname): elif hwsku == "Celestica-E1031-T48S4": for i in range(1, 53): port_alias_to_name_map["etp%d" % i] = "Ethernet%d" % ((i - 1)) + elif hwsku == "et6448m": + for i in range(0, 52): + port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i + elif hwsku == "newport": + for i in range(0, 256, 8): + port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i else: for i in range(0, 128, 4): port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i diff --git a/ansible/library/port_alias.py b/ansible/library/port_alias.py index 7a74a090d0a..289d00de0f1 100755 --- a/ansible/library/port_alias.py +++ b/ansible/library/port_alias.py @@ -42,30 +42,51 @@ PORTMAP_FILE = 'port_config.ini' ALLOWED_HEADER = ['name', 'lanes', 'alias', 'index', 'speed'] +MACHINE_CONF = '/host/machine.conf' +ONIE_PLATFORM_KEY = 'onie_platform' +ABOOT_PLATFORM_KEY = 'aboot_platform' + +KVM_PLATFORM = 'x86_64-kvm_x86_64-r0' + class SonicPortAliasMap(): """ Retrieve SONiC device interface port alias mapping and port speed if they are definded """ def __init__(self, hwsku): - self.filename = '' self.hwsku = hwsku return - def findfile(self): - for (rootdir, dirnames, filenames) in os.walk(FILE_PATH, followlinks=True): - if self.hwsku == rootdir.split('/')[-1] and len(dirnames) == 0 and PORTMAP_FILE in filenames: - self.filename = rootdir + '/' + PORTMAP_FILE + def get_platform_type(self): + if not os.path.exists(MACHINE_CONF): + return KVM_PLATFORM + with open(MACHINE_CONF) as machine_conf: + for line in machine_conf: + tokens = line.split('=') + key = tokens[0].strip() + value = tokens[1].strip() + if key == ONIE_PLATFORM_KEY or key == ABOOT_PLATFORM_KEY: + return value + return None + + def get_portconfig_path(self): + platform = self.get_platform_type() + if platform is None: + return None + portconfig = os.path.join(FILE_PATH, platform, self.hwsku, PORTMAP_FILE) + if os.path.exists(portconfig): + return portconfig + return None def get_portmap(self): aliases = [] portmap = {} aliasmap = {} portspeed = {} - self.findfile() - if self.filename == '': + filename = self.get_portconfig_path() + if filename is None: raise Exception("Something wrong when trying to find the portmap file, either the hwsku is not available or file location is not correct") - with open(self.filename) as f: + with open(filename) as f: lines = f.readlines() alias_index = -1 speed_index = -1 @@ -91,7 +112,7 @@ def get_portmap(self): aliases.append(alias) portmap[name] = alias aliasmap[alias] = name - if speed_index != -1: + if (speed_index != -1) and (len(mapping) > speed_index): portspeed[alias] = mapping[speed_index] return (aliases, portmap, aliasmap, portspeed) diff --git a/ansible/library/reduce_and_add_sonic_images.py b/ansible/library/reduce_and_add_sonic_images.py index f2bb7e5de6c..9d8fd6c2f7b 100644 --- a/ansible/library/reduce_and_add_sonic_images.py +++ b/ansible/library/reduce_and_add_sonic_images.py @@ -26,6 +26,8 @@ from os import path from ansible.module_utils.basic import * +results = {"downloaded_image_version": "Unknown"} + def exec_command(module, cmd, ignore_error=False, msg="executing command"): rc, out, err = module.run_command(cmd) if not ignore_error and rc != 0: @@ -45,30 +47,43 @@ def get_disk_free_size(module, partition): def reduce_installed_sonic_images(module, disk_used_pcent): exec_command(module, cmd="sonic_installer cleanup -y", ignore_error=True) +def download_new_sonic_image(module, new_image_url, save_as): + global results + if not new_image_url: + return + exec_command(module, + cmd="curl -o {} {}".format(save_as, new_image_url), + msg="downloading new image") + if path.exists(save_as): + results['downloaded_image_version'] = exec_command(module, + cmd="sonic_installer binary_version %s" % save_as + ).rstrip('\n') def install_new_sonic_image(module, new_image_url): if not new_image_url: return avail = get_disk_free_size(module, "/host") - if avail >= 1500: + if avail >= 2000: # There is enough space to install directly + save_as = "/host/downloaded-sonic-image" + download_new_sonic_image(module, new_image_url, save_as) exec_command(module, - cmd="sonic_installer install %s -y" % new_image_url, + cmd="sonic_installer install {} -y".format(save_as), msg="installing new image") + exec_command(module, cmd="rm -f {}".format(save_as)) else: # Create a tmpfs partition to download image to install exec_command(module, cmd="mkdir -p /tmp/tmpfs", ignore_error=True) exec_command(module, cmd="umount /tmp/tmpfs", ignore_error=True) exec_command(module, - cmd="mount -t tmpfs -o size=1000M tmpfs /tmp/tmpfs", + cmd="mount -t tmpfs -o size=1300M tmpfs /tmp/tmpfs", msg="mounting tmpfs") + save_as = "/tmp/tmpfs/downloaded-sonic-image" + download_new_sonic_image(module, new_image_url, save_as) exec_command(module, - cmd="curl -o /tmp/tmpfs/sonic-image %s" % new_image_url, - msg="downloading new image") - exec_command(module, - cmd="sonic_installer install /tmp/tmpfs/sonic-image -y", + cmd="sonic_installer install {} -y".format(save_as), msg="installing new image") exec_command(module, cmd="sync", ignore_error=True) @@ -100,7 +115,7 @@ def main(): err = str(sys.exc_info()) module.fail_json(msg="Error: %s" % err) - module.exit_json() + module.exit_json(ansible_facts=results) if __name__ == '__main__': main() diff --git a/ansible/library/sensors_facts.py b/ansible/library/sensors_facts.py index a6a7e9e99b5..448968c5ab1 100644 --- a/ansible/library/sensors_facts.py +++ b/ansible/library/sensors_facts.py @@ -1,5 +1,5 @@ #!/usr/bin/python - +import re import subprocess from ansible.module_utils.basic import * @@ -151,14 +151,19 @@ def get_raw_value(self, path): ''' keys = path.split('/') - cur_value = self.raw + cur_values = self.raw + res = None for key in keys: - if key in cur_value: - cur_value = cur_value[key] - else: + pattern = re.compile(key) + for cur_value in cur_values.keys(): + res = re.match(pattern, cur_value) + if res is not None: + cur_values = cur_values[res.group()] + break + if res is None: return None - return cur_value + return cur_values def check_alarms(self): ''' diff --git a/ansible/library/show_interface.py b/ansible/library/show_interface.py index 01366f5556b..ccd06e0b715 100644 --- a/ansible/library/show_interface.py +++ b/ansible/library/show_interface.py @@ -153,7 +153,7 @@ def collect_interface_status(self): return def collect_interface_counter(self): - regex_int = re.compile(r'(\S+)\s+(\w)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)') + regex_int = re.compile(r'\s*(\S+)\s+(\w)\s+([,\d]+)\s+(N\/A|[.0-9]+ B/s)\s+(\S+)\s+([,\d]+)\s+(\S+)\s+([,\d]+)\s+([,\d]+)\s+(N\/A|[.0-9]+ B/s)\s+(\S+)\s+([,\d]+)\s+(\S+)\s+([,\d]+)') self.int_counter = {} try: rc, self.out, err = self.module.run_command('show interface counter', executable='/bin/bash', use_unsafe_shell=True) diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py index 5a2381037df..51488757c40 100644 --- a/ansible/library/snmp_facts.py +++ b/ansible/library/snmp_facts.py @@ -44,7 +44,11 @@ required: false is_dell: description: - - Whether the bos is dell or not + - Whether the nos is dell or not + required: false + is_eos: + description: + - Whether the nos is eos or not required: false level: description: @@ -167,23 +171,17 @@ def __init__(self,dotprefix=False): self.lldpLocSysDesc = dp + "1.0.8802.1.1.2.1.3.4" # From LLDP-MIB: lldpLocPortTable - self.lldpLocPortNum = dp + "1.0.8802.1.1.2.1.3.7.1.1" # + .ifindex self.lldpLocPortIdSubtype = dp + "1.0.8802.1.1.2.1.3.7.1.2" # + .ifindex self.lldpLocPortId = dp + "1.0.8802.1.1.2.1.3.7.1.3" # + .ifindex self.lldpLocPortDesc = dp + "1.0.8802.1.1.2.1.3.7.1.4" # + .ifindex # From LLDP-MIB: lldpLocManAddrTables - self.lldpLocManAddrSubtype = dp + "1.0.8802.1.1.2.1.3.8.1.1" # + .subtype + .man addr - self.lldpLocManAddr = dp + "1.0.8802.1.1.2.1.3.8.1.2" # + .subtype + .man addr self.lldpLocManAddrLen = dp + "1.0.8802.1.1.2.1.3.8.1.3" # + .subtype + .man addr self.lldpLocManAddrIfSubtype = dp + "1.0.8802.1.1.2.1.3.8.1.4" # + .subtype + .man addr self.lldpLocManAddrIfId = dp + "1.0.8802.1.1.2.1.3.8.1.5" # + .subtype + .man addr self.lldpLocManAddrOID = dp + "1.0.8802.1.1.2.1.3.8.1.6" # + .subtype + .man addr # From LLDP-MIB: lldpRemTable - self.lldpRemTimeMark = dp + "1.0.8802.1.1.2.1.4.1.1.1" # + .time mark + .ifindex + .rem index - self.lldpRemLocalPortNum = dp + "1.0.8802.1.1.2.1.4.1.1.2" # + .time mark + .ifindex + .rem index - self.lldpRemIndex = dp + "1.0.8802.1.1.2.1.4.1.1.3" # + .time mark + .ifindex + .rem index self.lldpRemChassisIdSubtype = dp + "1.0.8802.1.1.2.1.4.1.1.4" # + .time mark + .ifindex + .rem index self.lldpRemChassisId = dp + "1.0.8802.1.1.2.1.4.1.1.5" # + .time mark + .ifindex + .rem index self.lldpRemPortIdSubtype = dp + "1.0.8802.1.1.2.1.4.1.1.6" # + .time mark + .ifindex + .rem index @@ -195,8 +193,6 @@ def __init__(self,dotprefix=False): self.lldpRemSysCapEnabled = dp + "1.0.8802.1.1.2.1.4.1.1.12" # + .time mark + .ifindex + .rem index # From LLDP-MIB: lldpRemManAddrTable - self.lldpRemManAddrSubtype = dp + "1.0.8802.1.1.2.1.4.2.1.1" # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr - self.lldpRemManAddr = dp + "1.0.8802.1.1.2.1.4.2.1.2" # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr self.lldpRemManAddrIfSubtype = dp + "1.0.8802.1.1.2.1.4.2.1.3" # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr self.lldpRemManAddrIfId = dp + "1.0.8802.1.1.2.1.4.2.1.4" # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr self.lldpRemManAddrOID = dp + "1.0.8802.1.1.2.1.4.2.1.5" # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr @@ -204,6 +200,10 @@ def __init__(self,dotprefix=False): # From Dell Private MIB self.ChStackUnitCpuUtil5sec = dp + "1.3.6.1.4.1.6027.3.10.1.2.9.1.2.1" + # Memory Check + self.sysTotalMemery = dp + "1.3.6.1.4.1.2021.4.5.0" + self.sysTotalFreeMemery = dp + "1.3.6.1.4.1.2021.4.6.0" + # From Cisco private MIB (PFC and queue counters) self.cpfcIfRequests = dp + "1.3.6.1.4.1.9.9.813.1.1.1.1" # + .ifindex self.cpfcIfIndications = dp + "1.3.6.1.4.1.9.9.813.1.1.1.2" # + .ifindex @@ -295,6 +295,7 @@ def main(): authkey=dict(required=False), privkey=dict(required=False), is_dell=dict(required=False, default=False, type='bool'), + is_eos=dict(required=False, default=False, type='bool'), removeplaceholder=dict(required=False)), required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],), supports_check_mode=False) @@ -375,6 +376,7 @@ def main(): cmdgen.MibVariable(p.sysContact,), cmdgen.MibVariable(p.sysName,), cmdgen.MibVariable(p.sysLocation,), + lookupMib=False, lexicographicMode=False ) if errorIndication: @@ -408,6 +410,7 @@ def main(): cmdgen.MibVariable(p.ipAdEntIfIndex,), cmdgen.MibVariable(p.ipAdEntNetMask,), cmdgen.MibVariable(p.ifAlias,), + lookupMib=False, lexicographicMode=False ) if errorIndication: @@ -432,7 +435,7 @@ def main(): if v.ifMtu in current_oid: ifIndex = int(current_oid.rsplit('.', 1)[-1]) results['snmp_interfaces'][ifIndex]['mtu'] = current_val - if v.ifMtu in current_oid: + if v.ifSpeed in current_oid: ifIndex = int(current_oid.rsplit('.', 1)[-1]) results['snmp_interfaces'][ifIndex]['speed'] = current_val if v.ifPhysAddress in current_oid: @@ -472,6 +475,7 @@ def main(): cmdgen.MibVariable(p.ifHCOutOctets,), cmdgen.MibVariable(p.ifInUcastPkts,), cmdgen.MibVariable(p.ifOutUcastPkts,), + lookupMib=False, lexicographicMode=False ) if errorIndication: @@ -609,6 +613,7 @@ def main(): snmp_auth, cmdgen.UdpTransportTarget((m_args['host'], 161)), cmdgen.MibVariable(p.ChStackUnitCpuUtil5sec,), + lookupMib=False, lexicographicMode=False ) if errorIndication: @@ -647,7 +652,6 @@ def main(): errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.lldpLocPortNum,), cmdgen.MibVariable(p.lldpLocPortIdSubtype,), cmdgen.MibVariable(p.lldpLocPortId,), cmdgen.MibVariable(p.lldpLocPortDesc,), @@ -660,9 +664,6 @@ def main(): for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.lldpLocPortNum in current_oid: - ifIndex = int(current_oid.rsplit('.', 1)[-1]) - results['snmp_interfaces'][ifIndex]['lldpLocPortNum'] = current_val if v.lldpLocPortIdSubtype in current_oid: ifIndex = int(current_oid.rsplit('.', 1)[-1]) results['snmp_interfaces'][ifIndex]['lldpLocPortIdSubtype'] = current_val @@ -676,8 +677,6 @@ def main(): errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.lldpLocManAddrSubtype,), - cmdgen.MibVariable(p.lldpLocManAddr,), cmdgen.MibVariable(p.lldpLocManAddrLen,), cmdgen.MibVariable(p.lldpLocManAddrIfSubtype,), cmdgen.MibVariable(p.lldpLocManAddrIfId,), @@ -691,12 +690,6 @@ def main(): for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.lldpLocManAddrSubtype in current_oid: - address = '.'.join(current_oid.split('.')[13:]) - results['snmp_lldp']['lldpLocManAddrSubtype'] = current_val - if v.lldpLocManAddr in current_oid: - address = '.'.join(current_oid.split('.')[13:]) - results['snmp_lldp']['lldpLocManAddr'] = current_val if v.lldpLocManAddrLen in current_oid: address = '.'.join(current_oid.split('.')[13:]) results['snmp_lldp']['lldpLocManAddrLen'] = current_val @@ -713,9 +706,6 @@ def main(): errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.lldpRemTimeMark,), - cmdgen.MibVariable(p.lldpRemLocalPortNum,), - cmdgen.MibVariable(p.lldpRemIndex,), cmdgen.MibVariable(p.lldpRemChassisIdSubtype,), cmdgen.MibVariable(p.lldpRemChassisId,), cmdgen.MibVariable(p.lldpRemPortIdSubtype,), @@ -734,15 +724,6 @@ def main(): for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.lldpRemTimeMark in current_oid: - ifIndex = int(current_oid.split('.')[12]) - results['snmp_interfaces'][ifIndex]['lldpRemTimeMark'] = current_val - if v.lldpRemLocalPortNum in current_oid: - ifIndex = int(current_oid.split('.')[12]) - results['snmp_interfaces'][ifIndex]['lldpRemLocalPortNum'] = current_val - if v.lldpRemIndex in current_oid: - ifIndex = int(current_oid.split('.')[12]) - results['snmp_interfaces'][ifIndex]['lldpRemIndex'] = current_val if v.lldpRemChassisIdSubtype in current_oid: ifIndex = int(current_oid.split('.')[12]) results['snmp_interfaces'][ifIndex]['lldpRemChassisIdSubtype'] = current_val @@ -774,8 +755,6 @@ def main(): errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.lldpRemManAddrSubtype,), - cmdgen.MibVariable(p.lldpRemManAddr,), cmdgen.MibVariable(p.lldpRemManAddrIfSubtype,), cmdgen.MibVariable(p.lldpRemManAddrIfId,), cmdgen.MibVariable(p.lldpRemManAddrOID,), @@ -788,14 +767,6 @@ def main(): for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.lldpRemManAddrSubtype in current_oid: - ifIndex = int(current_oid.split('.')[12]) - address = '.'.join(current_oid.split('.')[16:]) - results['snmp_interfaces'][ifIndex]['lldpRemManAddrSubtype'] = current_val - if v.lldpRemManAddr in current_oid: - ifIndex = int(current_oid.split('.')[12]) - address = '.'.join(current_oid.split('.')[16:]) - results['snmp_interfaces'][ifIndex]['lldpRemManAddr'] = current_val if v.lldpRemManAddrIfSubtype in current_oid: ifIndex = int(current_oid.split('.')[12]) address = '.'.join(current_oid.split('.')[16:]) @@ -877,6 +848,26 @@ def main(): psuIndex = int(current_oid.split('.')[-1]) results['snmp_psu'][psuIndex]['operstatus'] = current_val + if not m_args['is_eos']: + errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd( + snmp_auth, + cmdgen.UdpTransportTarget((m_args['host'], 161)), + cmdgen.MibVariable(p.sysTotalMemery,), + cmdgen.MibVariable(p.sysTotalFreeMemery,), + lookupMib=False, lexicographicMode=False + ) + + if errorIndication: + module.fail_json(msg=str(errorIndication) + ' querying system infomation.') + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if current_oid == v.sysTotalMemery: + results['ansible_sysTotalMemery'] = decode_type(module, current_oid, val) + elif current_oid == v.sysTotalFreeMemery: + results['ansible_sysTotalFreeMemery'] = decode_type(module, current_oid, val) + module.exit_json(ansible_facts=results) main() diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py index 67ac2b56a2f..0e26a0e1b7e 100644 --- a/ansible/library/test_facts.py +++ b/ansible/library/test_facts.py @@ -3,9 +3,11 @@ import traceback import ipaddr as ipaddress import csv +import string from operator import itemgetter from itertools import groupby import yaml +from collections import defaultdict DOCUMENTATION = ''' module: test_facts.py @@ -28,7 +30,7 @@ EXAMPLES = ''' Testbed CSV file example: - # conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,comment + # conf-name,group-name,topo,ptf_image_name,ptf_ip,ptf_ipv6,server,vm_base,dut,comment ptf1-m,ptf1,ptf32,docker-ptf-sai-mlnx,10.255.0.188/24,server_1,,str-msn2700-01,Tests ptf vms-t1,vms1-1,t1,docker-ptf-sai-mlnx,10.255.0.178/24,server_1,VM0100,str-msn2700-01,Tests vms vms-t1-lag,vms1-1,t1-lag,docker-ptf-sai-mlnx,10.255.0.178/24,server_1,VM0100,str-msn2700-01,Tests vms @@ -99,32 +101,39 @@ class ParseTestbedTopoinfo(): Parse the CSV file used to describe whole testbed info Please refer to the example of the CSV file format CSV file first line is title - The topology name in title is using uniq-name | conf-name + The topology name in title is using conf-name ''' def __init__(self, testbed_file): self.testbed_filename = testbed_file - self.testbed_topo = {} + self.testbed_topo = defaultdict() def read_testbed_topo(self): + CSV_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment') with open(self.testbed_filename) as f: - topo = csv.DictReader(f) + topo = csv.DictReader(f, fieldnames=CSV_FIELDS, delimiter=',') + + # Validate all field are in the same order and are present + header = next(topo) + for field in CSV_FIELDS: + assert header[field].replace('#', '').strip() == field + for line in topo: - tb_prop = {} - name = '' - for key in line: - if ('uniq-name' in key or 'conf-name' in key) and '#' in line[key]: - ### skip comment line - continue - elif 'uniq-name' in key or 'conf-name' in key: - name = line[key] - elif 'ptf_ip' in key and line[key]: - ptfaddress = ipaddress.IPNetwork(line[key]) - tb_prop['ptf_ip'] = str(ptfaddress.ip) - tb_prop['ptf_netmask'] = str(ptfaddress.netmask) - else: - tb_prop[key] = line[key] - if name: - self.testbed_topo[name] = tb_prop + if line['conf-name'].lstrip().startswith('#'): + ### skip comment line + continue + if line['ptf_ip']: + ptfaddress = ipaddress.IPNetwork(line['ptf_ip']) + line['ptf_ip'] = str(ptfaddress.ip) + line['ptf_netmask'] = str(ptfaddress.netmask) + if line['ptf_ipv6']: + ptfaddress = ipaddress.IPNetwork(line['ptf_ipv6']) + line['ptf_ipv6'] = str(ptfaddress.ip) + line['ptf_netmask_v6'] = str(ptfaddress.netmask) + + line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';') + del line['dut'] + + self.testbed_topo[line['conf-name']] = line return def get_testbed_info(self, testbed_name): diff --git a/ansible/library/testbed_vm_info.py b/ansible/library/testbed_vm_info.py index 9b32a062794..987a3d4c8b4 100644 --- a/ansible/library/testbed_vm_info.py +++ b/ansible/library/testbed_vm_info.py @@ -11,28 +11,32 @@ from collections import defaultdict import re +from ansible.parsing.dataloader import DataLoader +from ansible.inventory.manager import InventoryManager + DOCUMENTATION = ''' module: testbed_vm_info.py Ansible_version_added: 2.0.0.2 -short_description: Gather all related VMs info +short_description: Gather all related VMs info Description: When deploy testbed topology with VM connected to SONiC, gather neighbor VMs info for generating SONiC minigraph file options: base_vm: base vm name defined in testbed.csv for the deployed topology; required: True topo: topology name defined in testbed.csv for the deployed topology; required: True + vm_file: the virtual machine file path ; default: 'veos' Ansible_facts: - 'neighbor_eosvm_mgmt': all VM hosts management IPs + 'neighbor_eosvm_mgmt': all VM hosts management IPs 'topoall': topology information ''' EXAMPLES = ''' - name: gather vm information - testbed_vm_info: base_vm='VM0100' topo='t1' + testbed_vm_info: base_vm='VM0100' topo='t1' vm_file='veos' ''' -### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here +### Here are the assumption/expectation of files to gather VM informations, if the file location or name changes, please modify it here TOPO_PATH = 'vars/' VM_INV_FILE = 'veos' @@ -43,12 +47,14 @@ class TestbedVMFacts(): """ - def __init__(self, toponame, vmbase): + def __init__(self, toponame, vmbase, vmfile): CLET_SUFFIX = "-clet" toponame = re.sub(CLET_SUFFIX + "$", "", toponame) self.topofile = TOPO_PATH+'topo_'+toponame +'.yml' self.start_index = int(re.findall('VM(\d+)', vmbase)[0]) self.vmhosts = {} + self.vmfile = vmfile + self.inv_mgr = InventoryManager(loader=DataLoader(), sources=self.vmfile) return @@ -63,21 +69,12 @@ def get_neighbor_eos(self): return eos - def gather_veos_vms(self): - vms = {} - with open(VM_INV_FILE) as f: - lines = f.readlines() - for line in lines: - if 'VM' in line and 'ansible_host' in line: - items = line.split() - vms[items[0]] = items[1].split('=')[1] - return vms - def main(): module = AnsibleModule( argument_spec=dict( base_vm=dict(required=True, type='str'), topo=dict(required=True, type='str'), + vm_file=dict(default=VM_INV_FILE, type='str') ), supports_check_mode=True ) @@ -86,19 +83,18 @@ def main(): if 'ptf' in topo_type: module.exit_json(ansible_facts={'neighbor_eosvm_mgmt': {}}) try: - vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm']) + vmsall = TestbedVMFacts(m_args['topo'], m_args['base_vm'], m_args['vm_file']) neighbor_eos = vmsall.get_neighbor_eos() - vm_inv = vmsall.gather_veos_vms() for eos in neighbor_eos: vmname = 'VM'+format(neighbor_eos[eos], '04d') - if vmname in vm_inv: - vmsall.vmhosts[eos] = vm_inv[vmname] + if vmname in vmsall.inv_mgr.hosts: + vmsall.vmhosts[eos] = vmsall.inv_mgr.get_host(vmname).get_vars()['ansible_host'] else: err_msg = "cannot find the vm " + vmname + " in VM inventory file, please make sure you have enough VMs for the topology you are using" module.fail_json(msg=err_msg) module.exit_json(ansible_facts={'neighbor_eosvm_mgmt':vmsall.vmhosts, 'topoall': vmsall.topoall}) except (IOError, OSError): - module.fail_json(msg="Can not find file "+vmsall.topofile+" or "+VM_INV_FILE) + module.fail_json(msg="Can not find file "+vmsall.topofile+" or "+m_args['vm_file']+" or "+VM_INV_FILE) except Exception as e: module.fail_json(msg=traceback.format_exc()) diff --git a/ansible/library/testing_port_ip_facts.py b/ansible/library/testing_port_ip_facts.py index a7c2fa71a9a..553aebf97fb 100644 --- a/ansible/library/testing_port_ip_facts.py +++ b/ansible/library/testing_port_ip_facts.py @@ -33,7 +33,7 @@ dut_switch_ports: "{{ dut_switch_ports }}" minigraph_bgp: "{{ minigraph_bgp }}" minigraph_neighbors: "{{ minigraph_neighbors }}" - connection: local + delegate_to: localhost ''' diff --git a/ansible/library/topo_facts.py b/ansible/library/topo_facts.py index 19afa1801d9..0a2fd10b769 100644 --- a/ansible/library/topo_facts.py +++ b/ansible/library/topo_facts.py @@ -83,6 +83,11 @@ def get_topo_config(self, topo_name): else: vm_topo_config['disabled_host_interfaces'] = [] + if 'DUT' in topo_definition['topology']: + vm_topo_config['DUT'] = topo_definition['topology']['DUT'] + else: + vm_topo_config['DUT'] = {} + self.vm_topo_config = vm_topo_config return vm_topo_config diff --git a/ansible/linkstate/down.yml b/ansible/linkstate/down.yml index 8efbaf0e133..3ce1e47ab60 100644 --- a/ansible/linkstate/down.yml +++ b/ansible/linkstate/down.yml @@ -44,6 +44,9 @@ - hosts: eos gather_facts: no tasks: + - include_vars: ../group_vars/all/creds.yml + - name: Set ansible login user name and password + set_fact: ansible_user="root" ansible_password={{ eos_root_password }} - name: Check list of processes command: ps ax changed_when: False diff --git a/ansible/linkstate/up.yml b/ansible/linkstate/up.yml index 5b2679f3917..eb4d7cde31b 100644 --- a/ansible/linkstate/up.yml +++ b/ansible/linkstate/up.yml @@ -3,6 +3,9 @@ - hosts: eos gather_facts: no tasks: + - include_vars: ../group_vars/all/creds.yml + - name: Set ansible login user name and password + set_fact: ansible_user="root" ansible_password={{ eos_root_password }} - name: Check list of processes command: ps ax changed_when: False @@ -40,10 +43,10 @@ src: "{{ item }}" dest: /root with_items: - - ../files/lab_connection_graph.xml - - ../veos - - scripts/ptf_proxy.py - - ../vars/topo_{{ topo }}.yml + - "../files/lab_connection_graph.xml" + - "../veos" + - "scripts/ptf_proxy.py" + - "../vars/topo_{{ topo }}.yml" ignore_errors: yes # either sonic_str_*.csv or sonic_lab_*.csv exists - name: Rename topo to common filename command: mv topo_{{ topo }}.yml topo.yaml diff --git a/ansible/plugins/action/apswitch.py b/ansible/plugins/action/apswitch.py index 9ee15a77272..ae39718afb5 100644 --- a/ansible/plugins/action/apswitch.py +++ b/ansible/plugins/action/apswitch.py @@ -2,8 +2,8 @@ __metaclass__ = type from ansible.plugins.action import ActionBase -from ansible.utils.boolean import boolean -from ansible.utils.unicode import to_unicode +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_text import ast @@ -44,7 +44,7 @@ def run(self, tmp=None, task_vars=None): _template = self._loader.path_dwim_relative(self._loader.get_basedir(), 'templates', _template) f = open(_template, 'r') - template_data = to_unicode(f.read()) + template_data = to_text(f.read()) f.close() _template = self._templar.template(template_data) diff --git a/ansible/plugins/action/onie.py b/ansible/plugins/action/onie.py index b969b3cf1cb..16dd47edd7f 100644 --- a/ansible/plugins/action/onie.py +++ b/ansible/plugins/action/onie.py @@ -2,8 +2,8 @@ __metaclass__ = type from ansible.plugins.action import ActionBase -from ansible.utils.boolean import boolean -from ansible.utils.unicode import to_unicode +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.module_utils._text import to_text import ast @@ -34,7 +34,7 @@ def run(self, tmp=None, task_vars=None): _template = self._loader.path_dwim_relative(self._loader.get_basedir(), 'templates', _template) f = open(_template, 'r') - template_data = to_unicode(f.read()) + template_data = to_text(f.read()) f.close() _template = self._templar.template(template_data) diff --git a/ansible/plugins/callback/yaml.py b/ansible/plugins/callback/yaml.py index bcc94cdf8de..c50ff4b29d8 100644 --- a/ansible/plugins/callback/yaml.py +++ b/ansible/plugins/callback/yaml.py @@ -24,7 +24,7 @@ from ansible.plugins.callback.default import CallbackModule as Default # simple workaroud for using yaml callback plugin -from ansible.vars.unsafe_proxy import AnsibleUnsafeText +from ansible.utils.unsafe_proxy import AnsibleUnsafeText represent_unicode = yaml.representer.SafeRepresenter.represent_unicode from ansible.parsing.yaml.dumper import AnsibleDumper AnsibleDumper.add_representer( diff --git a/ansible/plugins/connection/switch.py b/ansible/plugins/connection/switch.py index e09408e8761..ba6ee521df4 100644 --- a/ansible/plugins/connection/switch.py +++ b/ansible/plugins/connection/switch.py @@ -11,6 +11,7 @@ import fcntl import pwd import time +import string from ansible import constants as C from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound @@ -53,35 +54,42 @@ def _build_command(self): self._ssh_command += ['-o', 'GSSAPIAuthentication=no', '-o', 'PubkeyAuthentication=no'] - self._ssh_command += ['-o', 'ConnectTimeout=60'] + self._ssh_command += ['-o', 'ConnectTimeout=' + str(self.timeout)] + + def _remove_unprintable(self, buff): + return filter(lambda x: x in string.printable, buff) def _spawn_connect(self): last_user = None client = None attempt = 0 + max_retries = 3 self._display.vvv("%s" % self.login) while attempt < len(self.login['user']): (user, login_passwd) = self.login['user'][attempt] if user != last_user: - if client: - client.close() cmd = self._ssh_command + ['-l', user, self.host] - self._display.vvv("SSH: EXEC {0}".format(' '.join(cmd)), - host=self.host) last_user = user - client = pexpect.spawn(' '.join(cmd), env={'TERM': 'dumb'}, timeout=60) - i = client.expect(['[Pp]assword:', pexpect.EOF]) - if i == 1: - self._display.vvv("Server closed the connection, retry in %d seconds" % self.connection_retry_interval, host=self.host) - time.sleep(self.connection_retry_interval) - last_user = None - continue + for conn_attempt in range(max_retries): + if client: + client.close() + self._display.vvv("SSH: EXEC {0}".format(' '.join(cmd)), host=self.host) + client = pexpect.spawn(' '.join(cmd), env={'TERM': 'dumb'}, timeout=self.timeout) + i = client.expect(['[Pp]assword:', pexpect.EOF, pexpect.TIMEOUT]) + if i == 0: + break + else: + self._display.vvv("Establish connection to server failed", host=self.host) + if conn_attempt < max_retries - 1: # To avoid unnecessary sleep if max retry reached + self._display.vvv("Retry in %d seconds" % self.connection_retry_interval, host=self.host) + time.sleep(self.connection_retry_interval) + else: + raise AnsibleError("Establish connection to server failed after tried %d times." % max_retries) self._display.vvv("Try password %s..." % login_passwd[0:4], host=self.host) client.sendline(login_passwd) - client.timeout = 60 i = client.expect(['>', '#', '[Pp]assword:', pexpect.EOF]) if i < 2: break @@ -98,15 +106,23 @@ def _spawn_connect(self): # determine the sku client.sendline('show version') - client.expect(['#', '>']) - if 'Arista' in client.before: - self.sku = 'eos' - elif 'Cisco' in client.before: - self.sku = 'nxos' - if 'MLNX-OS' in client.before: - self.sku = 'mlnx_os' - if 'Dell' in client.before: - self.sku = 'dell' + while True: + client.expect(['#', '>']) + # It may be that right after fanout starts + # the OS on fanout sends few promts which may not + # include 'show version' output + if 'show version' in client.before: + if 'Arista' in client.before: + self.sku = 'eos' + elif 'Cisco' in client.before: + self.sku = 'nxos' + elif ('MLNX-OS' in client.before) or ('Onyx' in client.before): + self.sku = 'mlnx_os' + elif 'Dell' in client.before: + self.sku = 'dell' + else: + raise AnsibleError("Unable to determine fanout SKU") + break if self.sku == 'mlnx_os': self.hname = ' '.join(self.before_backup[-3:]) @@ -184,14 +200,14 @@ def exec_command(self, *args, **kwargs): self.reboot = kwargs['reboot'] if kwargs['root']: self.login['user'] = 'root' + if kwargs['timeout']: + self.timeout = int(kwargs['timeout']) + else: + self.timeout = 60 self._build_command() client = self._spawn_connect() - # Set command timeout after connection is spawned - if kwargs['timeout']: - client.timeout = int(kwargs['timeout']) - # "%s>": non privileged prompt # "%s(\([a-z\-]+\))?#": privileged prompt including configure mode # Prompt includes Login, Password, and yes/no for "start shell" case in Dell FTOS (launch bash shell) @@ -219,8 +235,9 @@ def exec_command(self, *args, **kwargs): self._display.vvv('> %s' % (cmd), host=self.host) client.sendline(cmd) client.expect(prompts) - stdout += client.before - self._display.vvv('< %s' % (client.before), host=self.host) + before = self._remove_unprintable(client.before) + stdout += before + self._display.vvv('< %s' % (before), host=self.host) if self.reboot: if not self.enable: @@ -230,14 +247,14 @@ def exec_command(self, *args, **kwargs): i = client.expect(['\(y\/n\)\??\s*\[n\]', 'Proceed with reload\? \[confirm\]', 'System configuration has been modified. Save\? \[yes\/no\/cancel\/diff\]:']) if i == 2: # EOS behavior - stdout += client.before + stdout += self._remove_unprintable(client.before) client.sendline('n') i = client.expect('Proceed with reload\? \[confirm\]') - stdout += client.before + stdout += self._remove_unprintable(client.before) client.sendline('y') # The system is going down for reboot NOW: EOS i = client.expect(['>', '#', 'The system is going down for reboot NOW', pexpect.TIMEOUT, pexpect.EOF]) - stdout += client.before + stdout += self._remove_unprintable(client.before) if i < 2: raise AnsibleError("Box failed to reboot. stdout = %s" % stdout) self._display.vvv("Box rebooted", host=self.host) diff --git a/ansible/plugins/filter/filters.py b/ansible/plugins/filter/filters.py index 4bf39421c20..b0c20eec45a 100644 --- a/ansible/plugins/filter/filters.py +++ b/ansible/plugins/filter/filters.py @@ -8,6 +8,7 @@ def filters(self): 'filter_by_prefix': filter_by_prefix, 'filter_vm_targets': filter_vm_targets, 'extract_hostname': extract_hostname, + 'first_n_elements': first_n_elements, } def extract_by_prefix(values, prefix): @@ -55,6 +56,26 @@ def filter_by_prefix(values, prefix): return filter(lambda x: x.startswith(prefix), values) +def first_n_elements(values, num): + """ + This function return first n elements of a list. If the list length is less than n, then return the whole list + """ + if values is None: + raise errors.AnsibleFilterError('Values is not provided') + + if num is None: + raise errors.AnsibleFilterError('num is not provided') + + if not isinstance(values, list): + raise errors.AnsibleFilterError('Wrong type for values') + + if not isinstance(num, str) and not isinstance(num, unicode): + raise errors.AnsibleFilterError("Wrong type for the num {}".format(type(num))) + + if len(values) <= int(num): + return values + + return values[0:int(num)] def filter_vm_targets(values, topology, vm_base): """ @@ -85,6 +106,8 @@ def filter_vm_targets(values, topology, vm_base): result = [] base = values.index(vm_base) for hostname, attr in topology.iteritems(): + if base + attr['vm_offset'] >= len(values): + continue result.append(values[base + attr['vm_offset']]) return result @@ -124,6 +147,8 @@ def extract_hostname(values, topology, vm_base, inventory_hostname): hash = {} base = values.index(vm_base) for hostname, attr in topology.iteritems(): + if base + attr['vm_offset'] >= len(values): + continue if inventory_hostname == values[base + attr['vm_offset']]: return hostname diff --git a/ansible/roles/eos/tasks/main.yml b/ansible/roles/eos/tasks/main.yml index e3c64d6f805..9e1b5f3611e 100644 --- a/ansible/roles/eos/tasks/main.yml +++ b/ansible/roles/eos/tasks/main.yml @@ -47,7 +47,7 @@ - name: Expand {{ hostname }} properties into props set_fact: props="{{ configuration_properties[item] | combine(props | default({})) }}" - with_items: properties_list + with_items: "{{ properties_list }}" when: hostname in configuration and configuration_properties[item] is defined - name: build a startup config diff --git a/ansible/roles/eos/templates/t0-slx-leaf.j2 b/ansible/roles/eos/templates/t0-slx-leaf.j2 new file mode 100644 index 00000000000..277163ccc62 --- /dev/null +++ b/ansible/roles/eos/templates/t0-slx-leaf.j2 @@ -0,0 +1,189 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +{% if vm_mgmt_gw is defined %} +ip route vrf MGMT 0.0.0.0/0 {{ vm_mgmt_gw }} +{% else %} +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +{% endif %} +! +route-map DEFAULT_ROUTES permit +! +{# #} +{# NOTE: Using large enough values (e.g., podset_number = 200, #} +{# us to overflow the 192.168.0.0/16 private address space here. #} +{# This should be fine for internal use, but may pose an issue if used otherwise #} +{# #} +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) + + (subnet * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - ((props.tor_subnet_size | log(2))) | int) %} +ip route {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} {{ props.nhipv4 }} +ipv6 route {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/64 {{ props.nhipv6 }} +{% endif %} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{# Skip tor 0 podset 0 #} +{% if podset != 0 or tor != 0 %} +{% set suffix = ( (podset * props.tor_number * props.max_tor_subnet_number * props.tor_subnet_size) + + (tor * props.max_tor_subnet_number * props.tor_subnet_size) ) %} +{% set octet2 = (168 + (suffix // (256 ** 2))) %} +{% set octet1 = (192 + (octet2 // 256)) %} +{% set octet2 = (octet2 % 256) %} +{% set octet3 = ((suffix // 256) % 256) %} +{% set octet4 = (suffix % 256) %} +{% set prefixlen_v4 = (32 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +{% set prefixlen_v6 = (64 - (((props.max_tor_subnet_number * props.tor_subnet_size) | log(2)) | int) ) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit {{ octet1 }}.{{ octet2 }}.{{ octet3 }}.{{ octet4 }}/{{ prefixlen_v4 }} ge {{ prefixlen_v4 }} +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit {{ '20%02x' % octet1 }}:{{ '%02X%02X' % (octet2, octet3) }}:0:{{ '%02X' % octet4 }}::/{{ prefixlen_v6 }} ge {{ prefixlen_v6 }} +exit +{% endif %} +{% endfor %} +{% endfor %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if name.startswith('Port-Channel') %} + port-channel min-links 1 +{% endif %} +{% if iface['lacp'] is defined %} + channel-group {{ iface['lacp'] }} mode active + lacp rate normal +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} +{% if podset == 0 %} + set as-path prepend {{ torasn }} +{% else %} + set as-path prepend {{ props.spine_asn }} {{ leafasn }} {{ torasn }} +{% endif %} +! +{% endfor %} +{% endif %} +{% endfor %} +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} + redistribute static route-map PREPENDAS +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end +s + 136,1 66% + 72,1 23% + diff --git a/ansible/roles/eos/templates/t1-slx-spine.j2 b/ansible/roles/eos/templates/t1-slx-spine.j2 new file mode 100644 index 00000000000..57eca1bd1d5 --- /dev/null +++ b/ansible/roles/eos/templates/t1-slx-spine.j2 @@ -0,0 +1,138 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +! +route-map DEFAULT_ROUTES permit +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 192.168.{{ podset }}.{{ tor * 16 + subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16 + subnet)}}::/64 {{ props.nhipv6 }} +{% endfor %} +{% endfor %} +{% endfor %} +! +{% for podset in range(0, props.podset_number) %} +{% for tor in range(0, props.tor_number) %} +ip prefix-list test_ipv4_{{ podset}}_{{ tor }} seq 10 permit 192.168.{{ podset }}.{{ tor * 16 }}/28 ge 28 +ipv6 prefix-list test_ipv6_{{ podset}}_{{ tor }} + seq 10 permit 20C0:A8{{ '%02X' % podset }}:0:{{ '%02X' % (tor * 16) }}::/60 ge 60 +exit +{% endfor %} +{% endfor %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% for podset in range(0, props.podset_number) %} +{% if range(0, 1000)|random() >= props.failure_rate %} +{% for tor in range(0, props.tor_number) %} +{% set leafasn = props.leaf_asn_start + podset %} +{% set torasn = props.tor_asn_start + tor %} +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) }} + match ip address prefix-list test_ipv4_{{ podset }}_{{ tor }} + set as-path prepend {{ leafasn }} {{ torasn }} +! +route-map PREPENDAS permit {{ 2 * (podset * props.tor_number + tor + 1) + 1 }} + match ipv6 address prefix-list test_ipv6_{{ podset }}_{{ tor }} + set as-path prepend {{ leafasn }} {{ torasn }} +! +{% endfor %} +{% endif %} +{% endfor %} +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} + neighbor {{ remote_ip }} default-originate route-map DEFAULT_ROUTES +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} + redistribute static route-map PREPENDAS +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end + diff --git a/ansible/roles/eos/templates/t1-slx-tor.j2 b/ansible/roles/eos/templates/t1-slx-tor.j2 new file mode 100644 index 00000000000..2a515847655 --- /dev/null +++ b/ansible/roles/eos/templates/t1-slx-tor.j2 @@ -0,0 +1,132 @@ +{% set host = configuration[hostname] %} +{% set mgmt_ip = ansible_host %} +{% set tornum = host['tornum'] %} +no schedule tech-support +! +hostname {{ hostname }} +! +vrf definition MGMT + rd 1:1 +! +spanning-tree mode mstp +! +aaa root secret 0 123456 +! +username admin privilege 15 role network-admin secret 0 123456 +! +clock timezone UTC +! +lldp run +lldp management-address Management1 +lldp management-address vrf MGMT +! +snmp-server community {{ snmp_rocommunity }} ro +snmp-server vrf MGMT +! +ip routing +ip routing vrf MGMT +ipv6 unicast-routing +! +ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }} +! +{% for subnet in range(0, props.tor_subnet_number) %} +ip route 172.16.{{ tornum }}.{{ subnet }}/32 {{ props.nhipv4 }} +ipv6 route 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 {{ props.nhipv6 }} +{% endfor %} +{% if 'vips' in host %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip route {{ subnet }} {{ props.nhipv4 }} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +ip prefix-list test_vip_{{ index }} seq 1{{ index }} permit {{ subnet }} +{% set index = index + 1 %} +{% endfor %} +{% set index = 1 %} +{% for subnet in host['vips']['ipv4']['prefixes'] %} +route-map PREPENDAS permit 2{{ index }} + match ip address prefix-list test_vip_{{ index }} + set as-path prepend {{ host['vips']['ipv4']['asn'] }} +{% set index = index + 1 %} +{% endfor %} +{% endif %} +! +interface Management 1 + description TO LAB MGMT SWITCH + vrf forwarding MGMT + ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }} + no shutdown +! +{% for name, iface in host['interfaces'].items() %} +interface {{ name }} +{% if name.startswith('Loopback') %} + description LOOPBACK +{% else %} + no switchport +{% endif %} +{% if iface['ipv4'] is defined %} + ip address {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ iface['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +{% endfor %} +! +interface {{ bp_ifname }} + description backplane + no switchport +{% if host['bp_interface']['ipv4'] is defined %} + ip address {{ host['bp_interface']['ipv4'] }} +{% endif %} +{% if host['bp_interface']['ipv6'] is defined %} + ipv6 enable + ipv6 address {{ host['bp_interface']['ipv6'] }} + ipv6 nd ra suppress +{% endif %} + no shutdown +! +router bgp {{ host['bgp']['asn'] }} + router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }} + ! + graceful-restart restart-time {{ bgp_gr_timer }} + graceful-restart + ! +{% for asn, remote_ips in host['bgp']['peers'].items() %} +{% for remote_ip in remote_ips %} + neighbor {{ remote_ip }} remote-as {{ asn }} + neighbor {{ remote_ip }} description {{ asn }} +{% if remote_ip | ipv6 %} + address-family ipv6 + neighbor {{ remote_ip }} activate + exit +{% endif %} +{% endfor %} +{% endfor %} + ! +{% if 'vips' in host %} +redistribute static route-map PREPENDAS +{% endif %} +{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %} +{% if iface['ipv4'] is defined %} + network {{ iface['ipv4'] }} +{% endif %} +{% if iface['ipv6'] is defined %} + network {{ iface['ipv6'] }} +{% endif %} +{% endfor %} +{% for subnet in range(0, props.tor_subnet_number) %} + network 172.16.{{ tornum }}.{{ subnet }}/32 + network 20AC:10{{ '%02X' % tornum }}:0:{{ '%02X' % subnet }}::/64 +{% endfor %} +! +management api http-commands + no protocol https + protocol http + no shutdown +! +end + diff --git a/ansible/roles/fanout/tasks/main.yml b/ansible/roles/fanout/tasks/main.yml index 3ed4aae8a7e..6110d331001 100644 --- a/ansible/roles/fanout/tasks/main.yml +++ b/ansible/roles/fanout/tasks/main.yml @@ -30,7 +30,7 @@ leaf_name: "{{ inventory_hostname }}" leaf: "{{ ansible_host }}" - - include: rootfanout_connect.yml - deploy_leaf=true - when: sw_type == 'FanoutLeaf' +# - include: rootfanout_connect.yml +# deploy_leaf=true +# when: sw_type == 'FanoutLeaf' diff --git a/ansible/roles/test/files/acstests/acltb_test.py b/ansible/roles/test/files/acstests/acltb_test.py index bf5d1ecd3e5..ae4329a8449 100644 --- a/ansible/roles/test/files/acstests/acltb_test.py +++ b/ansible/roles/test/files/acstests/acltb_test.py @@ -1,76 +1,84 @@ ''' -Description: This file contains the ACL test for SONiC testbed +Description: - Implemented according to the https://github.com/Azure/SONiC/wiki/ACL-test-plan + This file contains the ACL test for SONiC testbed + Implemented according to the https://github.com/Azure/SONiC/wiki/ACL-test-plan -Usage: Examples of how to use: - ptf --test-dir acstests acltb_test.AclTest --platform remote -t 'router_mac="00:02:03:04:05:00";verbose=True;route_info="/tmp/route_info.txt"' +Usage: + Examples of how to use: + + ptf --test-dir acstests acltb_test.AclTest --platform-dir ptftests --platform remote + -t "router_mac='e4:1d:2d:f7:d5:40';testbed_type='t1-lag'; + tor_ports='27,22,29,25,20,28,26,21,24,31,23,30,19,16,18,17'; + spine_ports='7,2,11,0,1,6,13,12,14,10,15,8,5,4,9,3'; + dst_ip_tor='172.16.1.0';dst_ip_tor_forwarded='172.16.2.0';dst_ip_tor_blocked='172.16.3.0'; + dst_ip_spine='192.168.0.0';dst_ip_spine_forwarded='192.168.0.16';dst_ip_spine_blocked='192.168.0.17'" ''' +from __future__ import print_function -#--------------------------------------------------------------------- -# Global imports -#--------------------------------------------------------------------- -import random -import time import logging +import json + +import ptf import ptf.packet as scapy -import socket -import ptf.dataplane as dataplane +import ptf.testutils as testutils -from ptf.testutils import * +from ptf.testutils import simple_tcp_packet +from ptf.testutils import simple_udp_packet +from ptf.testutils import simple_icmp_packet +from ptf.testutils import dp_poll +from ptf.testutils import send_packet from ptf.mask import Mask -import ipaddress - -import os -import logging -import unittest - -import ptf from ptf.base_tests import BaseTest -from ptf import config -import ptf.dataplane as dataplane -import ptf.testutils as testutils -import pprint class AclTest(BaseTest): ''' @summary: ACL tests on testbed topo: t1 ''' - #--------------------------------------------------------------------- - # Class variables - #--------------------------------------------------------------------- - PORT_COUNT = 31 # temporary exclude the last port - def __init__(self): ''' @summary: constructor ''' BaseTest.__init__(self) self.test_params = testutils.test_params_get() - #--------------------------------------------------------------------- def setUp(self): ''' @summary: Setup for the test ''' - self.dataplane = ptf.dataplane_instance self.router_mac = self.test_params['router_mac'] self.testbed_type = self.test_params['testbed_type'] + self.tor_ports = [int(p) for p in self.test_params['tor_ports'].split(',')] + self.tor_ports.sort() + self.spine_ports = [int(p) for p in self.test_params['spine_ports'].split(',')] + self.spine_ports.sort() + self.dst_ip_tor = self.test_params['dst_ip_tor'] + self.dst_ip_tor_forwarded = self.test_params['dst_ip_tor_forwarded'] + self.dst_ip_tor_blocked = self.test_params['dst_ip_tor_blocked'] + self.dst_ip_spine = self.test_params['dst_ip_spine'] + self.dst_ip_spine_forwarded = self.test_params['dst_ip_spine_forwarded'] + self.dst_ip_spine_blocked = self.test_params['dst_ip_spine_blocked'] + self.current_src_port_idx = 0 # An index for choosing a port for injecting packet + self.test_results = [] + + def _select_src_port(self, src_ports): + """ + @summary: Choose a source port from list source ports in a round robin way + @return: Source port number picked from list of source ports + """ + if len(src_ports) == 0: + return None - #--------------------------------------------------------------------- + self.current_src_port_idx = self.current_src_port_idx % len(src_ports) # In case the index is out of range - ''' - For diagnostic purposes only - ''' - def print_route_info(self): - pprint.pprint(self.route_info) - return - #--------------------------------------------------------------------- + port = src_ports[self.current_src_port_idx] + self.current_src_port_idx = (self.current_src_port_idx + 1) % len(src_ports) + return port - def verify_packet_any_port(self, pkt, ports=[], device_number=0): + def verify_packet_any_port(self, pkt, ports, device_number=0): """ @summary: Check that the packet is received on _any_ of the specified ports belonging to the given device (default device_number is 0). @@ -80,8 +88,10 @@ def verify_packet_any_port(self, pkt, ports=[], device_number=0): Also verifies that the packet is or received on any other ports for this device, and that no other packets are received on the device (unless --relax is in effect). + @param pkt : packet to verify @param ports : list of ports + @param device_number: device number, default is 0 @return: index of the port on which the packet is received and the packet. """ @@ -93,225 +103,314 @@ def verify_packet_any_port(self, pkt, ports=[], device_number=0): match_index = ports.index(rcv_port) received = True - return (match_index, rcv_pkt, received) - #--------------------------------------------------------------------- + return match_index, rcv_pkt, received - def runSendReceiveTest(self, i, pkt2send, src_port , pkt2recv, dst_ports, expect): + def runSendReceiveTest(self, pkt2send, src_ports, pkt2recv, dst_ports, pkt_expected): """ @summary Send packet and verify it is received/not received on the expected ports + @param pkt2send: The packet that will be injected into src_port + @param src_ports: The port into which the pkt2send will be injected + @param pkt2recv: The packet that will be received on one of the dst_ports + @param dst_ports: The ports on which the pkt2recv may be received + @param pkt_expected: Indicated whether it is expected to receive the pkt2recv on one of the dst_ports """ masked2recv = Mask(pkt2recv) masked2recv.set_do_not_care_scapy(scapy.Ether, "dst") masked2recv.set_do_not_care_scapy(scapy.Ether, "src") - send_packet(self, src_port, pkt2send) - (index, rcv_pkt, received) = self.verify_packet_any_port(masked2recv, dst_ports) - self.tests_total += 1 + # Choose a source port from list of source ports + src_port = self._select_src_port(src_ports) - passed = received == expect - print "Test #" + str(i) + " %s" % ("PASSED" if passed else "FAILED") - self.tests_passed += 1 if passed else 0 + # Send the packet and poll on destination ports + send_packet(self, src_port, pkt2send) + logging.debug("Sent packet: " + pkt2send.summary()) + (index, rcv_pkt, received) = self.verify_packet_any_port(masked2recv, dst_ports) + logging.debug('index=%s, received=%s' % (str(index), str(received))) + if received: + logging.debug("Received packet: " + scapy.Ether(rcv_pkt).summary()) + + if pkt_expected: + logging.debug('Expected packet on dst_ports') + passed = True if received else False + logging.debug('Received: ' + str(received)) + else: + logging.debug('No packet expected on dst_ports') + passed = False if received else True + logging.debug('Received: ' + str(received)) + logging.debug('Passed: ' + str(passed)) return passed - #--------------------------------------------------------------------- - def runAclTests(self, dst_ip, dst_ip_blocked, src_port, dst_ports): + def runAclTestCase(self, test_name, *args, **kwargs): + """ + @summary: Wrapper for running ACL test cases + @param test_name: Friendly name of the test case + """ + logging.info('Run test case: ' + test_name) + res = self.runSendReceiveTest(*args, **kwargs) + logging.info('Result of "%s": %s' % (test_name, "PASSED" if res else "FAILED")) + self.test_results.append({"result": res, "test_name": test_name}) + + def runAclTests(self, dst_ip, dst_ip_forwarded, dst_ip_blocked, src_ports, dst_ports, direction): """ @summary: Crete and send packet to verify each ACL rule - @return: Number of tests passed """ - self.tests_passed = 0 - self.tests_total = 0 - - print "\nPort to sent packets to: %d" % src_port - print "Destination IP: %s" % dst_ip - print "Ports to expect packet from: ", - pprint.pprint(dst_ports) - print "Dst IP expected to be blocked: %s " % dst_ip_blocked - - pkt0 = simple_tcp_packet( - eth_dst = self.router_mac, - eth_src = self.dataplane.get_mac(0, 0), - ip_src = "10.0.0.1", - ip_dst = dst_ip, - tcp_sport = 0x4321, - tcp_dport = 0x51, - ip_ttl = 64 - ) - - exp_pkt0 = simple_tcp_packet( - eth_dst = self.dataplane.get_mac(0, 0), - eth_src = self.router_mac, - ip_src = "10.0.0.1", - ip_dst = dst_ip, - tcp_sport = 0x4321, - tcp_dport = 0x51, - ip_ttl = 63 - ) - - print "" - # Test #0 - unmatched packet - dropped - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - self.runSendReceiveTest(0, pkt, src_port, exp_pkt, dst_ports, 0) - - # Test #1 - source IP match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['IP'].src = "10.0.0.2" - exp_pkt['IP'].src = "10.0.0.2" - self.runSendReceiveTest(1, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #2 - destination IP match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['IP'].dst = dst_ip_blocked - exp_pkt['IP'].dst = dst_ip_blocked - self.runSendReceiveTest(2, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #3 - L4 source port match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['TCP'].sport = 0x1235 - exp_pkt['TCP'].sport = 0x1235 - self.runSendReceiveTest(3, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #4 - L4 destination port match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['TCP'].dport = 0x1235 - exp_pkt['TCP'].dport = 0x1235 - self.runSendReceiveTest(4, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #5 - IP protocol match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() + direction = ", " + direction + + print("\nPort to sent packets to:") + print(src_ports) + print("Destination IP: %s" % dst_ip) + print("Ports to expect packet from: ") + print(dst_ports) + print("Dst IP expected to be blocked: %s " % dst_ip_blocked) + + tcp_pkt0 = simple_tcp_packet( + eth_dst=self.router_mac, + eth_src=self.dataplane.get_mac(0, 0), + ip_src="20.0.0.1", + ip_dst=dst_ip, + tcp_sport=0x4321, + tcp_dport=0x51, + ip_ttl=64 + ) + + tcp_exp_pkt0 = simple_tcp_packet( + eth_dst=self.dataplane.get_mac(0, 0), + eth_src=self.router_mac, + ip_src="20.0.0.1", + ip_dst=dst_ip, + tcp_sport=0x4321, + tcp_dport=0x51, + ip_ttl=63 + ) + + test_name = 'Test #0 - unmatched packet - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = 'Test #1 - source IP match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.2" + exp_pkt['IP'].src = "20.0.0.2" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #2 - destination IP match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].dst = dst_ip_forwarded + exp_pkt['IP'].dst = dst_ip_forwarded + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #3 - L4 source port match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].sport = 0x120D + exp_pkt['TCP'].sport = 0x120D + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #4 - L4 destination port match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].dport = 0x1217 + exp_pkt['TCP'].dport = 0x1217 + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #5 - IP protocol match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() pkt['IP'].proto = 0x7E exp_pkt['IP'].proto = 0x7E - self.runSendReceiveTest(5, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #6 - TCP flags match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['TCP'].flags = 0x12 - exp_pkt['TCP'].flags = 0x12 - self.runSendReceiveTest(6, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #7 - source port range match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #6 - TCP flags match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].flags = 0x1B + exp_pkt['TCP'].flags = 0x1B + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #7 - source port range match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() pkt['TCP'].sport = 0x123A exp_pkt['TCP'].sport = 0x123A - self.runSendReceiveTest(7, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #8 - destination port range match - forwarded - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['TCP'].dport = 0x123A - exp_pkt['TCP'].dport = 0x123A - self.runSendReceiveTest(8, pkt, src_port, exp_pkt, dst_ports, 1) - - # Test #9 - rules priority - dropped - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['IP'].src = "10.0.0.3" - exp_pkt['IP'].src = "10.0.0.3" - self.runSendReceiveTest(9, pkt, src_port, exp_pkt, dst_ports, 0) - - # Create a ICMP packet - pkt0 = simple_icmp_packet( - eth_dst = self.router_mac, - eth_src = self.dataplane.get_mac(0, 0), - ip_src = "10.0.0.1", - ip_dst = dst_ip, - icmp_type=8, - icmp_code=0, - ip_ttl = 64 - ) - - exp_pkt0 = simple_icmp_packet( - eth_dst = self.dataplane.get_mac(0, 0), - eth_src = self.router_mac, - ip_src = "10.0.0.1", - ip_dst = dst_ip, - icmp_type=8, - icmp_code=0, - ip_ttl = 63 - ) - - # Test #10 - ICMP source IP match - forwarded - # IP_PROTOCOL = 0x1 - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['IP'].src = "10.0.0.2" - exp_pkt['IP'].src = "10.0.0.2" - self.runSendReceiveTest(10, pkt, src_port, exp_pkt, dst_ports, 1) + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #8 - destination port range match - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].dport = 0x123B + exp_pkt['TCP'].dport = 0x123B + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = '# Test #9 - rules priority - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.3" + exp_pkt['IP'].src = "20.0.0.3" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + # Create a ICMP packet + icmp_pkt0 = simple_icmp_packet( + eth_dst=self.router_mac, + eth_src=self.dataplane.get_mac(0, 0), + ip_src="20.0.0.1", + ip_dst=dst_ip, + icmp_type=8, + icmp_code=0, + ip_ttl=64 + ) + + icmp_exp_pkt0 = simple_icmp_packet( + eth_dst=self.dataplane.get_mac(0, 0), + eth_src=self.router_mac, + ip_src="20.0.0.1", + ip_dst=dst_ip, + icmp_type=8, + icmp_code=0, + ip_ttl=63 + ) + + test_name = 'Test #10 - ICMP source IP match, IP_PROTOCOL=0x1 - forwarded' + direction + pkt = icmp_pkt0.copy() + exp_pkt = icmp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.4" + exp_pkt['IP'].src = "20.0.0.4" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) # Create a UDP packet - pkt0 = simple_udp_packet( - eth_dst = self.router_mac, - eth_src = self.dataplane.get_mac(0, 0), - ip_src = "10.0.0.1", - ip_dst = dst_ip, - udp_sport = 1234, - udp_dport = 80, - ip_ttl = 64 - ) - - exp_pkt0 = simple_udp_packet( - eth_dst = self.dataplane.get_mac(0, 0), - eth_src = self.router_mac, - ip_src = "10.0.0.1", - ip_dst = dst_ip, - udp_sport = 1234, - udp_dport = 80, - ip_ttl = 63 - ) - - # Test #11 - UDP source IP match - forwarded - # IP_PROTOCOL = 0x11 - pkt = pkt0.copy() - exp_pkt = exp_pkt0.copy() - pkt['IP'].src = "10.0.0.2" - exp_pkt['IP'].src = "10.0.0.2" - self.runSendReceiveTest(11, pkt, src_port, exp_pkt, dst_ports, 1) - - return self.tests_passed, self.tests_total - - #--------------------------------------------------------------------- + udp_pkt0 = simple_udp_packet( + eth_dst=self.router_mac, + eth_src=self.dataplane.get_mac(0, 0), + ip_src="20.0.0.1", + ip_dst=dst_ip, + udp_sport=1234, + udp_dport=80, + ip_ttl=64 + ) + + udp_exp_pkt0 = simple_udp_packet( + eth_dst=self.dataplane.get_mac(0, 0), + eth_src=self.router_mac, + ip_src="20.0.0.1", + ip_dst=dst_ip, + udp_sport=1234, + udp_dport=80, + ip_ttl=63 + ) + + test_name = 'Test #11 - UDP source IP match, IP_PROTOCOL=0x11 - forwarded' + direction + pkt = udp_pkt0.copy() + exp_pkt = udp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.4" + exp_pkt['IP'].src = "20.0.0.4" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + ########################################################################### + + test_name = 'Test #12 - source IP match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.6" + exp_pkt['IP'].src = "20.0.0.6" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #13 - destination IP match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].dst = dst_ip_blocked + exp_pkt['IP'].dst = dst_ip_blocked + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #14 - L4 source port match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].sport = 0x1271 + exp_pkt['TCP'].sport = 0x1271 + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #15 - L4 destination port match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].dport = 0x127B + exp_pkt['TCP'].dport = 0x127B + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #16 - IP protocol match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].proto = 0x7F + exp_pkt['IP'].proto = 0x7F + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #17 - TCP flags match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].flags = 0x24 + exp_pkt['TCP'].flags = 0x24 + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #18 - source port range match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].sport = 0x129E + exp_pkt['TCP'].sport = 0x129E + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #19 - destination port range match - dropped' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['TCP'].dport = 0x129F + exp_pkt['TCP'].dport = 0x129F + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = '# Test #20 - rules priority - forwarded' + direction + pkt = tcp_pkt0.copy() + exp_pkt = tcp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.7" + exp_pkt['IP'].src = "20.0.0.7" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, True) + + test_name = 'Test #21 - ICMP source IP match, IP_PROTOCOL=0x1 - dropped' + direction + pkt = icmp_pkt0.copy() + exp_pkt = icmp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.8" + exp_pkt['IP'].src = "20.0.0.8" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) + + test_name = 'Test #22 - UDP source IP match, IP_PROTOCOL=0x11 - dropped' + direction + pkt = udp_pkt0.copy() + exp_pkt = udp_exp_pkt0.copy() + pkt['IP'].src = "20.0.0.8" + exp_pkt['IP'].src = "20.0.0.8" + self.runAclTestCase(test_name, pkt, src_ports, exp_pkt, dst_ports, False) def runTest(self): """ @summary: Crete and send packet to verify each ACL rule """ - test_result = False - - self.switch_info = open(self.test_params["switch_info"], 'r').readlines() - if self.testbed_type in [ 't1', 't1-lag', 't1-64-lag', 't1-64-lag-clet' ]: - self.tor_ports = map(int, self.switch_info[0].rstrip(",\n").split(",")) - self.spine_ports = map(int, self.switch_info[1].rstrip(",\n").split(",")) - self.dest_ip_addr_spine = self.switch_info[2].strip() - self.dest_ip_addr_spine_blocked = self.switch_info[3].strip() - self.dest_ip_addr_tor = self.switch_info[4].strip() - self.dest_ip_addr_tor_blocked = self.switch_info[5].strip() - - # Verify ACLs on tor port - (tests_passed, tests_total) = self.runAclTests(self.dest_ip_addr_spine, self.dest_ip_addr_spine_blocked, self.tor_ports[0], self.spine_ports) - assert(tests_passed == tests_total) - - # Verify ACLs on spine port - (tests_passed, tests_total) = self.runAclTests(self.dest_ip_addr_tor, self.dest_ip_addr_tor_blocked, self.spine_ports[0], self.tor_ports) - assert(tests_passed == tests_total) - elif self.testbed_type == 't0': - src_port = map(int, self.switch_info[0].rstrip(",\n").split(",")) - dst_ports = map(int, self.switch_info[1].rstrip(",\n").split(",")) - dst_ip = self.switch_info[2].strip() - dst_ip_blocked = self.switch_info[3].strip() - - (tests_passed, tests_total) = self.runAclTests(dst_ip, dst_ip_blocked, src_port[0], dst_ports) - assert(tests_passed == tests_total) - - + # Inject packets into TOR ports, check on SPINE ports + self.runAclTests(self.dst_ip_spine, + self.dst_ip_spine_forwarded, + self.dst_ip_spine_blocked, + self.tor_ports, + self.spine_ports, + "tor->spine") + + # Inject packets into SPINE ports, check on TOR ports + self.runAclTests(self.dst_ip_tor, + self.dst_ip_tor_forwarded, + self.dst_ip_tor_blocked, + self.spine_ports, + self.tor_ports, + "spine->tor") + + failed_cases = filter(lambda r: not r['result'], self.test_results) + if len(failed_cases) == 0: + print('!!!! All test cases passed! !!!!') + assert (len(failed_cases) == 0), "TEST FAILED. Failed test cases: " + str(failed_cases) diff --git a/ansible/roles/test/files/acstests/everflow_policer_test.py b/ansible/roles/test/files/acstests/everflow_policer_test.py index d89c340cc57..05a180e2451 100644 --- a/ansible/roles/test/files/acstests/everflow_policer_test.py +++ b/ansible/roles/test/files/acstests/everflow_policer_test.py @@ -6,7 +6,10 @@ ''' +import sys import time +import logging + import ptf import ptf.packet as scapy import ptf.dataplane as dataplane @@ -14,10 +17,13 @@ from ptf.base_tests import BaseTest from ptf.mask import Mask +logger = logging.getLogger('EverflowPolicerTest') + class EverflowPolicerTest(BaseTest): GRE_PROTOCOL_NUMBER = 47 NUM_OF_TOTAL_PACKETS = 500 + METER_TYPES = ['packets', 'bytes'] def __init__(self): @@ -43,6 +49,69 @@ def greFilter(self, pkt_str): return False + def getCBSRefillTime(self): + ''' + @summary: Gets Committed Burst Size (CBS) bucket refill time + + Note: + Committed Burst Size (CBS) refills at Committed Information Rate (CIR) speed. + Example: meter_type=packets, CBS=100 pkts, CIR=100 pkt/sec + refill_time = CBS/CIR = 100 pkts / 100 pkt/sec = 1 sec + ''' + + return self.cbs / self.cir + + + def setupLogging(self): + handler = logging.StreamHandler(sys.stdout) + logger.addHandler(handler) + + + def logParams(self): + ''' + @summary: Pretty prints test parameters + ''' + + logger.info("#") + logger.info("# Params") + logger.info("#") + + msg = "hwsku={}".format(self.hwsku) + logger.info(msg) + msg = "asic_type={}".format(self.asic_type) + logger.info(msg) + msg = "router_mac={}".format(self.router_mac) + logger.info(msg) + msg = "mirror_stage={}".format(self.mirror_stage) + logger.info(msg) + msg = "session_src_ip={}".format(self.session_src_ip) + logger.info(msg) + msg = "session_dst_ip={}".format(self.session_dst_ip) + logger.info(msg) + msg = "session_ttl={}".format(self.session_ttl) + logger.info(msg) + msg = "session_dscp={}".format(self.session_dscp) + logger.info(msg) + msg = "src_port={}".format(self.src_port) + logger.info(msg) + msg = "dst_mirror_ports={}".format(str(self.dst_mirror_ports)) + logger.info(msg) + msg = "dst_ports={}".format(str(self.dst_ports)) + logger.info(msg) + msg = "meter_type={}".format(self.meter_type) + logger.info(msg) + msg = "cir={}".format(self.cir) + logger.info(msg) + msg = "cbs={}".format(self.cbs) + logger.info(msg) + msg = "tolerance={}".format(self.tolerance) + logger.info(msg) + msg = "min_range={}".format(self.min_range) + logger.info(msg) + msg = "max_range={}".format(self.max_range) + logger.info(msg) + + def setUp(self): ''' @summary: Setup the test @@ -53,13 +122,28 @@ def setUp(self): self.hwsku = self.test_params['hwsku'] self.asic_type = self.test_params['asic_type'] self.router_mac = self.test_params['router_mac'] - self.session_src_ip = "1.1.1.1" - self.session_dst_ip = "2.2.2.2" - self.session_ttl = 1 - self.session_dscp = 8 + self.mirror_stage = self.test_params['mirror_stage'] + self.session_src_ip = self.test_params['session_src_ip'] + self.session_dst_ip = self.test_params['session_dst_ip'] + self.session_ttl = int(self.test_params['session_ttl']) + self.session_dscp = int(self.test_params['session_dscp']) self.src_port = int(self.test_params['src_port']) self.dst_mirror_ports = [int(p) for p in self.test_params['dst_mirror_ports'].split(",") if p] self.dst_ports = [int(p) for p in self.test_params['dst_ports'].split(",")] + self.meter_type = self.test_params['meter_type'] + self.cir = int(self.test_params['cir']) + self.cbs = int(self.test_params['cbs']) + self.tolerance = int(self.test_params['tolerance']) + + assert_str = "meter_type({0}) not in {1}".format(self.meter_type, str(self.METER_TYPES)) + assert self.meter_type in self.METER_TYPES, assert_str + assert_str = "cir({}) > 0".format(self.cir) + assert self.cir > 0, assert_str + assert_str = "cbs({}) > 0".format(self.cbs) + assert self.cbs > 0, assert_str + + self.min_range = self.cbs - (self.cbs / 100) * self.tolerance + self.max_range = self.cbs + (self.cbs / 100) * self.tolerance self.base_pkt = testutils.simple_tcp_packet( eth_dst = self.router_mac, @@ -71,6 +155,9 @@ def setUp(self): ip_dscp = 9, ip_ttl = 64) + self.setupLogging() + self.logParams() + def checkOriginalFlow(self): """ @summary: Send traffic & check how many original packets are received @@ -92,9 +179,11 @@ def checkOriginalFlow(self): if rcv_pkt is not None: count += 1 elif count == 0: - print "The first original packet is not recieved" - assert False # Fast failure without waiting for full iteration - print "Recieved " + str(count) + " original packets" + assert_str = "The first original packet is not recieved" + assert count > 0, assert_str # Fast failure without waiting for full iteration + + logger.info("Recieved {} original packets".format(count)) + return count def checkMirroredFlow(self): @@ -106,11 +195,23 @@ def checkMirroredFlow(self): Mellanox crafts the GRE packets with extra information: That is: 22 bytes extra information after the GRE header """ - payload = self.base_pkt + payload = self.base_pkt.copy() + payload_mask = Mask(payload) + + if self.mirror_stage == "egress": + payload['Ethernet'].src = self.router_mac + payload['IP'].ttl -= 1 + payload_mask.set_do_not_care_scapy(scapy.Ether, "dst") + payload_mask.set_do_not_care_scapy(scapy.IP, "chksum") + if self.asic_type in ["mellanox"]: import binascii payload = binascii.unhexlify("0"*44) + str(payload) # Add the padding + if self.asic_type in ["barefoot"]: + import binascii + payload = binascii.unhexlify("0"*24) + str(payload) # Add the padding + exp_pkt = testutils.simple_gre_packet( eth_src = self.router_mac, ip_src = self.session_src_ip, @@ -123,6 +224,8 @@ def checkMirroredFlow(self): if self.asic_type in ["mellanox"]: exp_pkt['GRE'].proto = 0x8949 # Mellanox specific + elif self.asic_type in ["barefoot"]: + exp_pkt['GRE'].proto = 0x22eb # Barefoot specific else: exp_pkt['GRE'].proto = 0x88be @@ -130,22 +233,34 @@ def checkMirroredFlow(self): masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "flags") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + masked_exp_pkt.set_do_not_care(38*8, len(payload)*8) # don't match payload, payload will be matched by match_payload(pkt) - if self.asic_type in ["mellanox"]: - masked_exp_pkt.set_do_not_care(304, 176) # Mask the Mellanox specific inner header + def match_payload(pkt): + if self.asic_type in ["mellanox"]: + pkt = scapy.Ether(pkt).load + pkt = pkt[22:] # Mask the Mellanox specific inner header + pkt = scapy.Ether(pkt) + else: + pkt = scapy.Ether(pkt)[scapy.GRE].payload + + return dataplane.match_exp_pkt(payload_mask, pkt) self.dataplane.flush() count = 0 + testutils.send_packet(self, self.src_port, self.base_pkt, count=self.NUM_OF_TOTAL_PACKETS) for i in range(0,self.NUM_OF_TOTAL_PACKETS): - testutils.send_packet(self, self.src_port, self.base_pkt) (rcv_device, rcv_port, rcv_pkt, pkt_time) = testutils.dp_poll(self, timeout=0.1, exp_pkt=masked_exp_pkt) - if rcv_pkt is not None: + if rcv_pkt is not None and match_payload(rcv_pkt): count += 1 elif count == 0: - print "The first mirrored packet is not recieved" - assert False # Fast failure without waiting for full iteration - print "Received " + str(count) + " mirrored packets after rate limiting" + assert_str = "The first mirrored packet is not recieved" + assert count > 0, assert_str # Fast failure without waiting for full iteration + else: + break # No more packets available + + logger.info("Received {} mirrored packets after rate limiting".format(count)) + return count @@ -154,16 +269,25 @@ def runTest(self): @summary: Run EVERFLOW Policer Test """ + logger.info("#") + logger.info("# Run test") + logger.info("#") + # Send traffic and verify the original traffic is not rate limited count = self.checkOriginalFlow() assert count == self.NUM_OF_TOTAL_PACKETS - # Sleep for t=CBS/CIR=(100packets)/(100packets/s)=1s to refill CBS capacity after checkOriginalFlow() + # Verify packet policing is used + assert_str = "Non packet policing is not supported" + assert self.meter_type == "packets", assert_str + + # Sleep for t=CBS/CIR to refill CBS capacity after checkOriginalFlow() # otherwise we can have first mirrored packet dropped by policer in checkMirroredFlow() - time.sleep(1) + time.sleep(self.getCBSRefillTime()) testutils.add_filter(self.greFilter) # Send traffic and verify the mirroed traffic is rate limited count = self.checkMirroredFlow() - assert count > 100 and count < self.NUM_OF_TOTAL_PACKETS # cbs = cir = 100 + assert_str = "min({1}) <= count({0}) <= max({2})".format(count, self.min_range, self.max_range) + assert count >= self.min_range and count <= self.max_range, assert_str diff --git a/ansible/roles/test/files/acstests/everflow_tb_test.py b/ansible/roles/test/files/acstests/everflow_tb_test.py index 26550f1d080..c717b32db5c 100644 --- a/ansible/roles/test/files/acstests/everflow_tb_test.py +++ b/ansible/roles/test/files/acstests/everflow_tb_test.py @@ -76,6 +76,9 @@ def setUp(self): self.src_port = int(float(self.test_params['src_port'])) self.dst_ports = [int(float(p)) for p in self.test_params['dst_ports'].split(",") if p] self.expected_dst_mac = self.test_params.get('expected_dst_mac', None) + self.expect_received = self.test_params.get('expect_received', True) + self.acl_stage = self.test_params.get('acl_stage', 'ingress') + self.mirror_stage = self.test_params.get('mirror_stage', 'ingress') testutils.add_filter(self.gre_type_filter) @@ -106,7 +109,7 @@ def receivePacketOnPorts(self, ports=[], device_number=0): return (match_index, rcv_pkt, received) - def runSendReceiveTest(self, pkt2send, src_port, destination_ports): + def sendReceive(self, pkt2send, src_port, destination_ports): """ @summary Send packet and verify it is received/not received on the expected ports """ @@ -142,14 +145,19 @@ def runSendReceiveTest(self, pkt2send, src_port, destination_ports): payload = str(scapy_pkt[scapy.GRE].payload) - if self.hwsku in ["ACS-MSN2700", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2740", "Mellanox-SN2700"]: + if self.asic_type in ["mellanox"]: payload = str(scapy_pkt[scapy.GRE].payload)[22:] if self.asic_type in ["barefoot"]: payload = str(scapy_pkt[scapy.GRE].payload)[12:] inner_pkt = scapy.Ether(payload) + if self.mirror_stage == 'egress': + pkt2send['IP'].ttl -= 1 # expect mirrored packet on egress has TTL decremented + masked_inner_pkt = Mask(inner_pkt) + masked_inner_pkt.set_do_not_care_scapy(scapy.Ether, "dst") + masked_inner_pkt.set_do_not_care_scapy(scapy.Ether, "src") if scapy.IP in inner_pkt: masked_inner_pkt.set_do_not_care_scapy(scapy.IP, "chksum") @@ -158,6 +166,12 @@ def runSendReceiveTest(self, pkt2send, src_port, destination_ports): return dataplane.match_exp_pkt(masked_inner_pkt, pkt2send) + def runSendReceiveTest(self, pkt, src_port, dst_ports): + if self.expect_received: + return self.sendReceive(pkt, src_port, dst_ports) + else: + return not self.sendReceive(pkt, src_port, dst_ports) + @reportResults("Verify SRC IP match") def verifySrcIp(self): diff --git a/ansible/roles/test/files/helpers/arp_responder.py b/ansible/roles/test/files/helpers/arp_responder.py index badbee6b91f..0012e13bb0d 100644 --- a/ansible/roles/test/files/helpers/arp_responder.py +++ b/ansible/roles/test/files/helpers/arp_responder.py @@ -7,7 +7,12 @@ import os.path from fcntl import ioctl from pprint import pprint - +import logging +logging.getLogger("scapy.runtime").setLevel(logging.ERROR) +import ptf.packet as scapy +import scapy.all as scapy2 +scapy2.conf.use_pcap=True +import scapy.arch.pcapdnet def hexdump(data): print " ".join("%02x" % ord(d) for d in data) @@ -39,18 +44,20 @@ def __del__(self): self.socket.close() def bind(self): - self.socket = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(self.ETH_P_ALL)) - self.socket.bind((self.iface, 0)) - self.socket.settimeout(self.RCV_TIMEOUT) + self.socket = scapy2.conf.L2listen(iface=self.iface, filter='arp') def handler(self): - return self.socket.fileno() + return self.socket def recv(self): - return self.socket.recv(self.RCV_SIZE) + sniffed = self.socket.recv() + pkt = sniffed[0] + str_pkt = str(pkt).encode("HEX") + binpkt = binascii.unhexlify(str_pkt) + return binpkt def send(self, data): - self.socket.send(data) + scapy2.sendp(data, iface=self.iface) def mac(self): return self.mac_address @@ -64,7 +71,7 @@ def __init__(self, interfaces, responder): self.responder = responder self.mapping = {} for interface in interfaces: - self.mapping[interface.handler()] = interface + self.mapping[interface.handler()] = interface def poll(self): handlers = self.mapping.keys() @@ -75,7 +82,7 @@ def poll(self): class ARPResponder(object): - ARP_PKT_LEN = 60 + ARP_PKT_LEN = 64 ARP_OP_REQUEST = 1 def __init__(self, ip_sets): self.arp_chunk = binascii.unhexlify('08060001080006040002') # defines a part of the packet for ARP Reply @@ -87,10 +94,10 @@ def __init__(self, ip_sets): def action(self, interface): data = interface.recv() - if len(data) != self.ARP_PKT_LEN: + if len(data) > self.ARP_PKT_LEN: return - remote_mac, remote_ip, request_ip, op_type = self.extract_arp_info(data) + remote_mac, remote_ip, request_ip, op_type, vlan_id = self.extract_arp_info(data) # Don't send ARP response if the ARP op code is not request if op_type != self.ARP_OP_REQUEST: @@ -99,18 +106,40 @@ def action(self, interface): request_ip_str = socket.inet_ntoa(request_ip) if request_ip_str not in self.ip_sets[interface.name()]: return - - arp_reply = self.generate_arp_reply(self.ip_sets[interface.name()][request_ip_str], remote_mac, request_ip, remote_ip) + arp_reply = self.generate_arp_reply(self.ip_sets[interface.name()][request_ip_str], remote_mac, request_ip, remote_ip, vlan_id) interface.send(arp_reply) return - + def extract_arp_info(self, data): # remote_mac, remote_ip, request_ip, op_type - return data[6:12], data[28:32], data[38:42], (ord(data[20]) * 256 + ord(data[21])) - - def generate_arp_reply(self, local_mac, remote_mac, local_ip, remote_ip): - return remote_mac + local_mac + self.arp_chunk + local_mac + local_ip + remote_mac + remote_ip + self.arp_pad + rem_ip_start = 28 + req_ip_start = 38 + op_type_start = 20 + eth_offset = 0 + vlan_id = None + ether_type = str(data[12:14]).encode("HEX") + if (ether_type == '8100'): + vlan = str(data[14:16]).encode("HEX") + if (vlan != '0000'): + eth_offset = 4 + vlan_id = data[14:16] + rem_ip_start = rem_ip_start + eth_offset + req_ip_start = req_ip_start + eth_offset + op_type_start = op_type_start + eth_offset + rem_ip_end = rem_ip_start + 4 + req_ip_end = req_ip_start + 4 + op_type_end = op_type_start + 1 + + return data[6:12], data[rem_ip_start:rem_ip_end], data[req_ip_start:req_ip_end], (ord(data[op_type_start]) * 256 + ord(data[op_type_end])), vlan_id + + def generate_arp_reply(self, local_mac, remote_mac, local_ip, remote_ip, vlan_id): + eth_hdr = remote_mac + local_mac + if vlan_id is not None: + eth_type = binascii.unhexlify('8100') + eth_hdr += eth_type + vlan_id + + return eth_hdr + self.arp_chunk + local_mac + local_ip + remote_mac + remote_ip + self.arp_pad def parse_args(): parser = argparse.ArgumentParser(description='ARP autoresponder') @@ -134,7 +163,13 @@ def main(): ip_sets = {} counter = 0 for iface, ip_dict in data.items(): - ip_sets[str(iface)] = {} + vlan = None + if iface.find('@') != -1: + iface, vlan = iface.split('@') + vlan_tag = format(int(vlan), 'x') + vlan_tag = vlan_tag.zfill(4) + if str(iface) not in ip_sets: + ip_sets[str(iface)] = {} if args.extended: for ip, mac in ip_dict.items(): ip_sets[str(iface)][str(ip)] = binascii.unhexlify(str(mac)) @@ -142,6 +177,8 @@ def main(): else: for ip in ip_dict: ip_sets[str(iface)][str(ip)] = get_mac(str(iface)) + if vlan is not None: + ip_sets[str(iface)]['vlan'] = binascii.unhexlify(vlan_tag) ifaces = [] for iface_name in ip_sets.keys(): diff --git a/ansible/roles/test/files/ptftests/IP_decap_test.py b/ansible/roles/test/files/ptftests/IP_decap_test.py index e00b3119d3a..9ce87cd8b0e 100644 --- a/ansible/roles/test/files/ptftests/IP_decap_test.py +++ b/ansible/roles/test/files/ptftests/IP_decap_test.py @@ -7,10 +7,13 @@ Precondition: Before the test start, all routes need to be defined as in the fib_info.txt file, in addition to the decap rule that need to be set as the dspc_mode -topology: Supports t1, t1-lag, t0-116 and t0 topology +topology: Supports all the variations of t0 and t1 topologies. Usage: Examples of how to start the test - ptf --test-dir /root/dor/ ip_decap_test_red --platform remote -t "verbose=True;fib_info='/root/fib_info.txt';lo_ip='10.1.0.32';router_mac='00:02:03:04:05:00';dscp_mode='pipe';ttl_mode='pipe';testbed_type='t1'" --log-dir /tmp/logs --verbose + ptf --test-dir /root/ptftest/ IP_decap_test.DecapPacketTest --platform-dir ptftests --platform remote \ + --qlen=1000 -t "verbose=True;fib_info='/root/fib_info.txt';lo_ip='10.1.0.32';\ + router_mac='00:02:03:04:05:00';dscp_mode='pipe';ttl_mode='pipe';testbed_type='t1';\ + vlan_ip='192.168.0.1';src_ports='1,2,3,4,5,6'" --log-file /tmp/logs --verbose Parameters: fib_info - The fib_info file location lo_ip - The loop_back IP that is configured in the decap rule @@ -25,6 +28,10 @@ inner_ipv6 - Test IPv6 encap packets outer_ipv4 - Test packets encapsulated in IPv4 outer_ipv6 - Test packets encapsulated in IPv6 + src_ports - The list of ports for injecting encapsulated packets. Separated by comma, for example: + "1,2,3,4,5,6" + vlan_ip - IPv4 address of the vlan interface. Required for t0 testbed type. + vlan_ipv6 - IPv6 address of the vlan interface. Optional. ''' @@ -35,28 +42,23 @@ import random import time import logging -import ptf.packet as scapy import socket -import ptf.dataplane as dataplane - -from ptf.testutils import * -from ptf.mask import Mask -import ipaddress - import os import unittest +import ipaddress import ptf +import ptf.packet as scapy +import ptf.testutils as testutils +from ptf.testutils import simple_ip_only_packet, simple_tcpv6_packet, simple_ipv4ip_packet, simple_ipv6ip_packet +from ptf.testutils import send_packet, verify_packet_any_port +from ptf.mask import Mask from ptf.base_tests import BaseTest from ptf import config -import ptf.dataplane as dataplane -import ptf.testutils as testutils - -import pprint - import fib + class DecapPacketTest(BaseTest): """ IP in IP decapsulation test """ @@ -89,18 +91,7 @@ def setUp(self): self.dataplane = ptf.dataplane_instance self.router_mac = self.test_params['router_mac'] self.fib = fib.Fib(self.test_params['fib_info']) - if self.test_params['testbed_type'] == 't1' or self.test_params['testbed_type'] == 't1-lag': - self.src_ports = range(0, 32) - if self.test_params['testbed_type'] == 't1-64-lag' or self.test_params['testbed_type'] == 't1-64-lag-clet': - self.src_ports = [0, 1, 4, 5, 16, 17, 20, 21, 34, 36, 37, 38, 39, 42, 44, 45, 46, 47, 50, 52, 53, 54, 55, 58, 60, 61, 62, 63] - if self.test_params['testbed_type'] == 't0': - self.src_ports = range(1, 25) + range(28, 32) - if self.test_params['testbed_type'] == 't0-64': - self.src_ports = [0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 36, 37, 38, 39, 40, 41, 42, 48, 52, 53, 54, 55, 56, 57, 58] - if self.test_params['testbed_type'] == 't0-116': - self.src_ports = range(0, 24) + range(32, 120) - if self.test_params['testbed_type'] == 't0-52': - self.src_ports = range(0, 52) + self.src_ports = [int(port) for port in self.test_params['src_ports'].split(',')] # which type of tunneled trafic to test (IPv4 in IPv4, IPv6 in IPv4, IPv6 in IPv4, IPv6 in IPv6) self.test_outer_ipv4 = self.test_params.get('outer_ipv4', True) @@ -108,6 +99,9 @@ def setUp(self): self.test_inner_ipv4 = self.test_params.get('inner_ipv4', True) self.test_inner_ipv6 = self.test_params.get('inner_ipv6', True) + self.vlan_ip = self.test_params.get('vlan_ip') + self.vlan_ipv6 = self.test_params.get('vlan_ipv6') + # Index of current DSCP and TTL value in allowed DSCP_RANGE and TTL_RANGE self.dscp_in_idx = 0 # DSCP of inner layer. self.dscp_out_idx = len(self.DSCP_RANGE) / 2 # DSCP of outer layer. Set different initial dscp_in and dscp_out @@ -352,6 +346,15 @@ def run_encap_combination_test(self, outer_pkt_type, inner_pkt_type): raise Exception('ERROR: Invalid inner packet type passed: ', inner_pkt_type) for ip_range in ip_ranges: + + # Skip the IP range on VLAN interface, t0 topology + if inner_pkt_type == 'ipv4' and self.vlan_ip and \ + ip_range.contains(ipaddress.ip_address(unicode(self.vlan_ip))): + continue + elif inner_pkt_type == 'ipv6' and self.vlan_ipv6 and \ + ip_range.contains(ipaddress.ip_address(unicode(self.vlan_ipv6))): + continue + # Get the expected list of ports that would receive the packets exp_port_list = self.fib[ip_range.get_first_ip()].get_next_hop_list() # Choose random one source port from all ports excluding the expected ones diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py index dd01345a2a9..74fc58690a9 100644 --- a/ansible/roles/test/files/ptftests/advanced-reboot.py +++ b/ansible/roles/test/files/ptftests/advanced-reboot.py @@ -109,6 +109,7 @@ def is_flooding(self): class ReloadTest(BaseTest): TIMEOUT = 0.5 + PKT_TOUT = 1 VLAN_BASE_MAC_PATTERN = '72060001{:04}' LAG_BASE_MAC_PATTERN = '5c010203{:04}' SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024 @@ -122,6 +123,7 @@ def __init__(self): self.log_lock = threading.RLock() self.vm_handle = None self.sad_handle = None + self.process_id = str(os.getpid()) self.test_params = testutils.test_params_get() self.check_param('verbose', False, required=False) self.check_param('dut_username', '', required=True) @@ -129,7 +131,7 @@ def __init__(self): self.check_param('dut_hostname', '', required=True) self.check_param('reboot_limit_in_seconds', 30, required=False) self.check_param('reboot_type', 'fast-reboot', required=False) - self.check_param('graceful_limit', 180, required=False) + self.check_param('graceful_limit', 240, required=False) self.check_param('portchannel_ports_file', '', required=True) self.check_param('vlan_ports_file', '', required=True) self.check_param('ports_file', '', required=True) @@ -149,6 +151,9 @@ def __init__(self): self.check_param('nexthop_ips', [], required = False) # nexthops for the routes that will be added during warm-reboot self.check_param('allow_vlan_flooding', False, required = False) self.check_param('sniff_time_incr', 60, required = False) + self.check_param('vnet', False, required = False) + self.check_param('vnet_pkts', None, required = False) + self.check_param('target_version', '', required = False) if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None': self.test_params['preboot_oper'] = None if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None': @@ -166,6 +171,11 @@ def __init__(self): self.log_file_name = '/tmp/%s.log' % self.test_params['reboot_type'] self.log_fp = open(self.log_file_name, 'w') + self.packets_list = [] + self.vnet = self.test_params['vnet'] + if (self.vnet): + self.packets_list = json.load(open(self.test_params['vnet_pkts'])) + # a flag whether to populate FDB by sending traffic from simulated servers # usually ARP responder will make switch populate its FDB table, but Mellanox on 201803 has # no L3 ARP support, so this flag is used to W/A this issue @@ -277,6 +287,7 @@ def log(self, message, verbose=False): if verbose and self.test_params['verbose'] or not verbose: print "%s : %s" % (current_time, message) self.log_fp.write("%s : %s\n" % (current_time, message)) + self.log_fp.flush() def timeout(self, func, seconds, message): async_res = self.pool.apply_async(func) @@ -483,7 +494,10 @@ def setUp(self): self.dut_mac = self.test_params['dut_mac'] # get VM info - arista_vms = self.test_params['arista_vms'][1:-1].split(",") + if isinstance(self.test_params['arista_vms'], list): + arista_vms = self.test_params['arista_vms'] + else: + arista_vms = self.test_params['arista_vms'][1:-1].split(",") self.ssh_targets = [] for vm in arista_vms: if (vm.startswith("'") or vm.startswith('"')) and (vm.endswith("'") or vm.endswith('"')): @@ -528,7 +542,8 @@ def setUp(self): # Pre-generate list of packets to be sent in send_in_background method. generate_start = datetime.datetime.now() - self.generate_bidirectional() + if not self.vnet: + self.generate_bidirectional() self.log("%d packets are ready after: %s" % (len(self.packets_list), str(datetime.datetime.now() - generate_start))) self.dataplane = ptf.dataplane_instance @@ -723,245 +738,263 @@ def generate_bidirectional(self): from_port = src_port self.packets_list.append((from_port, str(packet))) - def runTest(self): - self.reboot_start = None - no_routing_start = None - no_routing_stop = None - no_cp_replies = None - upper_replies = [] - routing_always = False + def put_nowait(self, queue, data): + try: + queue.put_nowait(data) + except Queue.Full: + pass + def pre_reboot_test_setup(self): + self.reboot_start = None + self.no_routing_start = None + self.no_routing_stop = None + self.no_cp_replies = None + self.upper_replies = [] + self.routing_always = False self.ssh_jobs = [] for addr in self.ssh_targets: - q = Queue.Queue() + q = Queue.Queue(1) thr = threading.Thread(target=self.peer_state_check, kwargs={'ip': addr, 'queue': q}) thr.setDaemon(True) self.ssh_jobs.append((thr, q)) thr.start() - thr = threading.Thread(target=self.reboot_dut) - thr.setDaemon(True) + if self.setup_fdb_before_test: + self.log("Run some server traffic to populate FDB table...") + self.setup_fdb() + + self.log("Starting reachability state watch thread...") + self.watching = True + self.light_probe = False + self.watcher_is_stopped = threading.Event() # Waiter Event for the Watcher state is stopped. + self.watcher_is_running = threading.Event() # Waiter Event for the Watcher state is running. + self.watcher_is_stopped.set() # By default the Watcher is not running. + self.watcher_is_running.clear() # By default its required to wait for the Watcher started. + # Give watch thread some time to wind up + watcher = self.pool.apply_async(self.reachability_watcher) + time.sleep(5) + + def wait_until_reboot(self): + self.log("Wait until Control plane is down") + self.timeout(self.wait_until_cpu_port_down, self.task_timeout, "DUT hasn't shutdown in {} seconds".format(self.task_timeout)) + if self.reboot_type == 'fast-reboot': + self.light_probe = True + else: + # add or del routes during boot + self.do_inboot_oper() + self.reboot_start = datetime.datetime.now() + self.log("Dut reboots: reboot start %s" % str(self.reboot_start)) + + def handle_fast_reboot_health_check(self): + self.log("Check that device is still forwarding data plane traffic") + self.fails['dut'].add("Data plane has a forwarding problem after CPU went down") + self.check_alive() + self.fails['dut'].clear() - try: - if self.setup_fdb_before_test: - self.log("Run some server traffic to populate FDB table...") - self.setup_fdb() - - - self.log("Starting reachability state watch thread...") - self.watching = True - self.light_probe = False - self.watcher_is_stopped = threading.Event() # Waiter Event for the Watcher state is stopped. - self.watcher_is_running = threading.Event() # Waiter Event for the Watcher state is running. - self.watcher_is_stopped.set() # By default the Watcher is not running. - self.watcher_is_running.clear() # By default its required to wait for the Watcher started. - # Give watch thread some time to wind up - watcher = self.pool.apply_async(self.reachability_watcher) - time.sleep(5) + self.log("Wait until control plane up") + async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up) - self.log("Check that device is alive and pinging") - self.fails['dut'].add("DUT is not ready for test") - self.wait_dut_to_warm_up() - self.fails['dut'].clear() + self.log("Wait until data plane stops") + async_forward_stop = self.pool.apply_async(self.check_forwarding_stop) - self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay) - thr.start() + try: + async_cpu_up.get(timeout=self.task_timeout) + except TimeoutError as e: + self.log("DUT hasn't bootup in %d seconds" % self.task_timeout) + self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout) + raise - self.log("Wait until Control plane is down") - self.timeout(self.wait_until_cpu_port_down, self.task_timeout, "DUT hasn't shutdown in {} seconds".format(self.task_timeout)) + try: + self.no_routing_start, self.upper_replies = async_forward_stop.get(timeout=self.task_timeout) + self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(self.no_routing_start)) + except TimeoutError: + self.log("Data plane never stop") + self.routing_always = True + self.upper_replies = [self.nr_vl_pkts] + + if self.no_routing_start is not None: + self.no_routing_stop, _ = self.timeout(self.check_forwarding_resume, + self.task_timeout, + "DUT hasn't started to work for %d seconds" % self.task_timeout) + else: + self.no_routing_stop = datetime.datetime.min + self.no_routing_start = datetime.datetime.min - if self.reboot_type == 'fast-reboot': - self.light_probe = True - else: - # add or del routes during boot - self.do_inboot_oper() + # Stop watching DUT + self.watching = False - self.reboot_start = datetime.datetime.now() - self.log("Dut reboots: reboot start %s" % str(self.reboot_start)) + def handle_warm_reboot_health_check(self): + self.send_and_sniff() - if self.reboot_type == 'fast-reboot': - self.log("Check that device is still forwarding data plane traffic") - self.fails['dut'].add("Data plane has a forwarding problem after CPU went down") - self.check_alive() - self.fails['dut'].clear() - - self.log("Wait until control plane up") - async_cpu_up = self.pool.apply_async(self.wait_until_cpu_port_up) - - self.log("Wait until data plane stops") - async_forward_stop = self.pool.apply_async(self.check_forwarding_stop) - - try: - async_cpu_up.get(timeout=self.task_timeout) - except TimeoutError as e: - self.log("DUT hasn't bootup in %d seconds" % self.task_timeout) - self.fails['dut'].add("DUT hasn't booted up in %d seconds" % self.task_timeout) - raise - - try: - no_routing_start, upper_replies = async_forward_stop.get(timeout=self.task_timeout) - self.log("Data plane was stopped, Waiting until it's up. Stop time: %s" % str(no_routing_start)) - except TimeoutError: - self.log("Data plane never stop") - routing_always = True - upper_replies = [self.nr_vl_pkts] - - if no_routing_start is not None: - no_routing_stop, _ = self.timeout(self.check_forwarding_resume, - self.task_timeout, - "DUT hasn't started to work for %d seconds" % self.task_timeout) - else: - no_routing_stop = datetime.datetime.min - no_routing_start = datetime.datetime.min + # Stop watching DUT + self.watching = False + self.log("Stopping reachability state watch thread.") + self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped. - # Stop watching DUT - self.watching = False + self.save_sniffed_packets() - if self.reboot_type == 'warm-reboot': - self.send_and_sniff() + examine_start = datetime.datetime.now() + self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start)) + self.examine_flow() + self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start)) - # Stop watching DUT - self.watching = False - self.log("Stopping reachability state watch thread.") - self.watcher_is_stopped.wait(timeout = 10) # Wait for the Watcher stopped. + if self.lost_packets: + self.no_routing_stop, self.no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start) + self.log("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id)) + self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \ + (self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets)) + else: + self.no_routing_start = self.reboot_start + self.no_routing_stop = self.reboot_start - self.save_sniffed_packets() + def handle_post_reboot_health_check(self): + # wait until all bgp session are established + self.log("Wait until bgp routing is up on all devices") + for _, q in self.ssh_jobs: + q.put('quit') - examine_start = datetime.datetime.now() - self.log("Packet flow examine started %s after the reboot" % str(examine_start - self.reboot_start)) - self.examine_flow() - self.log("Packet flow examine finished after %s" % str(datetime.datetime.now() - examine_start)) + def wait_for_ssh_threads(): + while any(thr.is_alive() for thr, _ in self.ssh_jobs): + time.sleep(self.TIMEOUT) - if self.lost_packets: - no_routing_stop, no_routing_start = datetime.datetime.fromtimestamp(self.no_routing_stop), datetime.datetime.fromtimestamp(self.no_routing_start) - self.log("The longest disruption lasted %.3f seconds. %d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id)) - self.log("Total disruptions count is %d. All disruptions lasted %.3f seconds. Total %d packet(s) lost" % \ - (self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets)) - else: - no_routing_start = self.reboot_start - no_routing_stop = self.reboot_start + for thr, _ in self.ssh_jobs: + thr.join() - # wait until all bgp session are established - self.log("Wait until bgp routing is up on all devices") - for _, q in self.ssh_jobs: - q.put('quit') + self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout) - def wait_for_ssh_threads(): - while any(thr.is_alive() for thr, _ in self.ssh_jobs): - for _, q in self.ssh_jobs: - q.put('go') - time.sleep(self.TIMEOUT) + self.log("Data plane works again. Start time: %s" % str(self.no_routing_stop)) + self.log("") - for thr, _ in self.ssh_jobs: - thr.join() + if self.reboot_type == 'fast-reboot': + self.no_cp_replies = self.extract_no_cpu_replies(self.upper_replies) - self.timeout(wait_for_ssh_threads, self.task_timeout, "SSH threads haven't finished for %d seconds" % self.task_timeout) + if self.no_routing_stop - self.no_routing_start > self.limit: + self.fails['dut'].add("Longest downtime period must be less then %s seconds. It was %s" \ + % (self.test_params['reboot_limit_in_seconds'], str(self.no_routing_stop - self.no_routing_start))) + if self.no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']): + self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit'])) + if self.reboot_type == 'fast-reboot' and self.no_cp_replies < 0.95 * self.nr_vl_pkts: + self.fails['dut'].add("Dataplane didn't route to all servers, when control-plane was down: %d vs %d" % (self.no_cp_replies, self.nr_vl_pkts)) - self.log("Data plane works again. Start time: %s" % str(no_routing_stop)) - self.log("") + if self.reboot_type == 'warm-reboot': + if self.total_disrupt_time > self.limit.total_seconds(): + self.fails['dut'].add("Total downtime period must be less then %s seconds. It was %s" \ + % (str(self.limit), str(self.total_disrupt_time))) - if self.reboot_type == 'fast-reboot': - no_cp_replies = self.extract_no_cpu_replies(upper_replies) + # after the data plane is up, check for routing changes + if self.test_params['inboot_oper'] and self.sad_handle: + self.check_inboot_sad_status() - if no_routing_stop - no_routing_start > self.limit: - self.fails['dut'].add("Downtime must be less then %s seconds. It was %s" \ - % (self.test_params['reboot_limit_in_seconds'], str(no_routing_stop - no_routing_start))) - if no_routing_stop - self.reboot_start > datetime.timedelta(seconds=self.test_params['graceful_limit']): - self.fails['dut'].add("%s cycle must be less than graceful limit %s seconds" % (self.reboot_type, self.test_params['graceful_limit'])) - if self.reboot_type == 'fast-reboot' and no_cp_replies < 0.95 * self.nr_vl_pkts: - self.fails['dut'].add("Dataplane didn't route to all servers, when control-plane was down: %d vs %d" % (no_cp_replies, self.nr_vl_pkts)) + # postboot check for all preboot operations + if self.test_params['preboot_oper'] and self.sad_handle: + self.check_postboot_sad_status() - if self.reboot_type == 'warm-reboot': - # after the data plane is up, check for routing changes - if self.test_params['inboot_oper'] and self.sad_handle: - self.check_inboot_sad_status() + else: + # verify there are no interface flaps after warm boot + self.neigh_lag_status_check() - # postboot check for all preboot operations - if self.test_params['preboot_oper'] and self.sad_handle: - self.check_postboot_sad_status() + def handle_post_reboot_test_reports(self): + # Stop watching DUT + self.watching = False + # revert to pretest state + if self.sad_oper and self.sad_handle: + self.sad_revert() + if self.test_params['inboot_oper']: + self.check_postboot_sad_status() + self.log(" ") + # Generating report + self.log("="*50) + self.log("Report:") + self.log("="*50) + + self.log("LACP/BGP were down for (extracted from cli):") + self.log("-"*50) + for ip in sorted(self.cli_info.keys()): + self.log(" %s - lacp: %7.3f (%d) po_events: (%d) bgp v4: %7.3f (%d) bgp v6: %7.3f (%d)" \ + % (ip, self.cli_info[ip]['lacp'][1], self.cli_info[ip]['lacp'][0], \ + self.cli_info[ip]['po'][1], \ + self.cli_info[ip]['bgp_v4'][1], self.cli_info[ip]['bgp_v4'][0],\ + self.cli_info[ip]['bgp_v6'][1], self.cli_info[ip]['bgp_v6'][0])) + + self.log("-"*50) + self.log("Extracted from VM logs:") + self.log("-"*50) + for ip in sorted(self.logs_info.keys()): + self.log("Extracted log info from %s" % ip) + for msg in sorted(self.logs_info[ip].keys()): + if not msg in [ 'error', 'route_timeout' ]: + self.log(" %s : %d" % (msg, self.logs_info[ip][msg])) else: - # verify there are no interface flaps after warm boot - self.neigh_lag_status_check() + self.log(" %s" % self.logs_info[ip][msg]) + self.log("-"*50) - except Exception as e: - self.fails['dut'].add(e) - finally: - # Stop watching DUT - self.watching = False - - # revert to pretest state - if self.sad_oper and self.sad_handle: - self.sad_revert() - if self.test_params['inboot_oper']: - self.check_postboot_sad_status() - self.log(" ") + self.log("Summary:") + self.log("-"*50) - # Generating report - self.log("="*50) - self.log("Report:") - self.log("="*50) + if self.no_routing_stop: + self.log("Longest downtime period was %s" % str(self.no_routing_stop - self.no_routing_start)) + reboot_time = "0:00:00" if self.routing_always else str(self.no_routing_stop - self.reboot_start) + self.log("Reboot time was %s" % reboot_time) + self.log("Expected downtime is less then %s" % self.limit) - self.log("LACP/BGP were down for (extracted from cli):") - self.log("-"*50) - for ip in sorted(self.cli_info.keys()): - self.log(" %s - lacp: %7.3f (%d) po_events: (%d) bgp v4: %7.3f (%d) bgp v6: %7.3f (%d)" \ - % (ip, self.cli_info[ip]['lacp'][1], self.cli_info[ip]['lacp'][0], \ - self.cli_info[ip]['po'][1], \ - self.cli_info[ip]['bgp_v4'][1], self.cli_info[ip]['bgp_v4'][0],\ - self.cli_info[ip]['bgp_v6'][1], self.cli_info[ip]['bgp_v6'][0])) + if self.reboot_type == 'fast-reboot' and self.no_cp_replies: + self.log("How many packets were received back when control plane was down: %d Expected: %d" % (self.no_cp_replies, self.nr_vl_pkts)) + has_info = any(len(info) > 0 for info in self.info.values()) + if has_info: self.log("-"*50) - self.log("Extracted from VM logs:") + self.log("Additional info:") self.log("-"*50) - for ip in sorted(self.logs_info.keys()): - self.log("Extracted log info from %s" % ip) - for msg in sorted(self.logs_info[ip].keys()): - if not msg in [ 'error', 'route_timeout' ]: - self.log(" %s : %d" % (msg, self.logs_info[ip][msg])) - else: - self.log(" %s" % self.logs_info[ip][msg]) - self.log("-"*50) - - self.log("Summary:") + for name, info in self.info.items(): + for entry in info: + self.log("INFO:%s:%s" % (name, entry)) self.log("-"*50) - if no_routing_stop: - self.log("Downtime was %s" % str(no_routing_stop - no_routing_start)) - reboot_time = "0:00:00" if routing_always else str(no_routing_stop - self.reboot_start) - self.log("Reboot time was %s" % reboot_time) - self.log("Expected downtime is less then %s" % self.limit) + is_good = all(len(fails) == 0 for fails in self.fails.values()) - if self.reboot_type == 'fast-reboot' and no_cp_replies: - self.log("How many packets were received back when control plane was down: %d Expected: %d" % (no_cp_replies, self.nr_vl_pkts)) + errors = "" + if not is_good: + self.log("-"*50) + self.log("Fails:") + self.log("-"*50) - has_info = any(len(info) > 0 for info in self.info.values()) - if has_info: - self.log("-"*50) - self.log("Additional info:") - self.log("-"*50) - for name, info in self.info.items(): - for entry in info: - self.log("INFO:%s:%s" % (name, entry)) - self.log("-"*50) + errors = "\n\nSomething went wrong. Please check output below:\n\n" + for name, fails in self.fails.items(): + for fail in fails: + self.log("FAILED:%s:%s" % (name, fail)) + errors += "FAILED:%s:%s\n" % (name, fail) - is_good = all(len(fails) == 0 for fails in self.fails.values()) + self.log("="*50) - errors = "" - if not is_good: - self.log("-"*50) - self.log("Fails:") - self.log("-"*50) + self.assertTrue(is_good, errors) - errors = "\n\nSomething went wrong. Please check output below:\n\n" - for name, fails in self.fails.items(): - for fail in fails: - self.log("FAILED:%s:%s" % (name, fail)) - errors += "FAILED:%s:%s\n" % (name, fail) + def runTest(self): + self.pre_reboot_test_setup() + try: + self.log("Check that device is alive and pinging") + self.fails['dut'].add("DUT is not ready for test") + self.wait_dut_to_warm_up() + self.fails['dut'].clear() - self.log("="*50) + self.log("Schedule to reboot the remote switch in %s sec" % self.reboot_delay) + thr = threading.Thread(target=self.reboot_dut) + thr.setDaemon(True) + thr.start() - self.assertTrue(is_good, errors) + self.wait_until_reboot() + if self.reboot_type == 'fast-reboot': + self.handle_fast_reboot_health_check() + if self.reboot_type == 'warm-reboot': + self.handle_warm_reboot_health_check() + self.handle_post_reboot_health_check() + + # Check sonic version after reboot + self.check_sonic_version_after_reboot() + except Exception as e: + self.fails['dut'].add(e) + finally: + self.handle_post_reboot_test_reports() def neigh_lag_status_check(self): """ @@ -978,6 +1011,19 @@ def neigh_lag_status_check(self): else: self.fails[neigh].add("LAG flapped %s times on %s after warm boot" % (flap_cnt, neigh)) + def check_sonic_version_after_reboot(self): + # Check sonic version after reboot + target_version = self.test_params['target_version'] + if target_version: + stdout, stderr, return_code = self.dut_connection.execCommand("sudo sonic_installer list | grep Current | awk '{print $2}'") + current_version = "" + if stdout != []: + current_version = str(stdout[0]).replace('\n', '') + self.log("Current={} Target={}".format(current_version, target_version)) + if current_version != target_version: + raise Exception("Sonic upgrade failed. Target={} Current={}".format(\ + target_version, current_version)) + def extract_no_cpu_replies(self, arr): """ This function tries to extract number of replies from dataplane, when control plane is non working @@ -995,7 +1041,7 @@ def reboot_dut(self): time.sleep(self.reboot_delay) self.log("Rebooting remote side") - stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type, timeout=self.task_timeout) + stdout, stderr, return_code = self.dut_connection.execCommand("sudo " + self.reboot_type) if stdout != []: self.log("stdout from %s: %s" % (self.reboot_type, str(stdout))) if stderr != []: @@ -1025,7 +1071,7 @@ def peer_state_check(self, ip, queue): def wait_until_cpu_port_down(self): while True: for _, q in self.ssh_jobs: - q.put('go') + self.put_nowait(q, 'cpu_down') if self.cpu_state.get() == 'down': break time.sleep(self.TIMEOUT) @@ -1033,7 +1079,7 @@ def wait_until_cpu_port_down(self): def wait_until_cpu_port_up(self): while True: for _, q in self.ssh_jobs: - q.put('go') + self.put_nowait(q, 'cpu_up') if self.cpu_state.get() == 'up': break time.sleep(self.TIMEOUT) @@ -1062,7 +1108,10 @@ def send_in_background(self, packets_list = None, interval = None): self.log("Sender started at %s" % str(sender_start)) for entry in packets_list: time.sleep(interval) - testutils.send_packet(self, *entry) + if self.vnet: + testutils.send_packet(self, entry[0], entry[1].decode("base64")) + else: + testutils.send_packet(self, *entry) self.log("Sender has been running for %s" % str(datetime.datetime.now() - sender_start)) # Remove filter self.apply_filter_all_ports('') @@ -1165,6 +1214,22 @@ def examine_flow(self, filename = None): self.check_tcp_payload(pkt) and self.no_flood(pkt) ] + + if self.vnet: + decap_packets = [ scapyall.Ether(str(pkt.payload.payload.payload)[8:]) for pkt in all_packets if + scapyall.UDP in pkt and + pkt[scapyall.UDP].sport == 1234 + ] + filtered_decap_packets = [ pkt for pkt in decap_packets if + scapyall.TCP in pkt and + not scapyall.ICMP in pkt and + pkt[scapyall.TCP].sport == 1234 and + pkt[scapyall.TCP].dport == 5000 and + self.check_tcp_payload(pkt) and + self.no_flood(pkt) + ] + filtered_packets = filtered_packets + filtered_decap_packets + # Re-arrange packets, if delayed, by Payload ID and Timestamp: packets = sorted(filtered_packets, key = lambda packet: (int(str(packet[scapyall.TCP].payload)), packet.time )) self.lost_packets = dict() @@ -1209,8 +1274,8 @@ def examine_flow(self, filename = None): self.fails['dut'].add("Sniffer failed to filter any traffic from DUT") self.assertTrue(received_counter, "Sniffer failed to filter any traffic from DUT") self.fails['dut'].clear() + self.disrupts_count = len(self.lost_packets) # Total disrupt counter. if self.lost_packets: - self.disrupts_count = len(self.lost_packets) # Total disrupt counter. # Find the longest loss with the longest time: max_disrupt_from_id, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start, self.no_routing_stop) = \ max(self.lost_packets.items(), key = lambda item:item[1][0:2]) @@ -1219,6 +1284,10 @@ def examine_flow(self, filename = None): self.log("Disruptions happen between %s and %s after the reboot." % \ (str(self.disruption_start - self.reboot_start), str(self.disruption_stop - self.reboot_start))) else: + self.max_lost_id = 0 + self.max_disrupt_time = 0 + self.total_disrupt_packets = 0 + self.total_disrupt_time = 0 self.log("Gaps in forwarding not found.") self.log("Total incoming packets captured %d" % received_counter) if packets: @@ -1232,7 +1301,7 @@ def check_forwarding_stop(self): while True: state = self.asic_state.get() for _, q in self.ssh_jobs: - q.put('go') + self.put_nowait(q, 'check_stop') if state == 'down': break time.sleep(self.TIMEOUT) @@ -1251,6 +1320,7 @@ def check_forwarding_resume(self): return self.asic_state.get_state_time(state), self.get_asic_vlan_reachability() def ping_data_plane(self, light_probe=True): + self.dataplane.flush() replies_from_servers = self.pingFromServers() if replies_from_servers > 0 or not light_probe: replies_from_upper = self.pingFromUpperTier() @@ -1282,7 +1352,7 @@ def wait_dut_to_warm_up(self): up_time = datetime.datetime.now() up_secs = (datetime.datetime.now() - up_time).total_seconds() if up_secs > dut_stabilize_secs: - break; + break else: # reset up_time up_time = None @@ -1458,7 +1528,7 @@ def pingFromServers(self): for i in xrange(self.nr_pc_pkts): testutils.send_packet(self, self.from_server_src_port, self.from_vlan_packet) - total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_vlan_exp_packet, self.from_server_dst_ports, timeout=self.TIMEOUT) + total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_vlan_exp_packet, self.from_server_dst_ports, timeout=self.PKT_TOUT) self.log("Send %5d Received %5d servers->t1" % (self.nr_pc_pkts, total_rcv_pkt_cnt), True) @@ -1468,7 +1538,7 @@ def pingFromUpperTier(self): for entry in self.from_t1: testutils.send_packet(self, *entry) - total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_t1_exp_packet, self.vlan_ports, timeout=self.TIMEOUT) + total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.from_t1_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT) self.log("Send %5d Received %5d t1->servers" % (self.nr_vl_pkts, total_rcv_pkt_cnt), True) @@ -1478,7 +1548,7 @@ def pingDut(self): for i in xrange(self.ping_dut_pkts): testutils.send_packet(self, self.random_port(self.vlan_ports), self.ping_dut_packet) - total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.ping_dut_exp_packet, self.vlan_ports, timeout=self.TIMEOUT) + total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.ping_dut_exp_packet, self.vlan_ports, timeout=self.PKT_TOUT) self.log("Send %5d Received %5d ping DUT" % (self.ping_dut_pkts, total_rcv_pkt_cnt), True) @@ -1487,6 +1557,6 @@ def pingDut(self): def arpPing(self): for i in xrange(self.arp_ping_pkts): testutils.send_packet(self, self.arp_src_port, self.arp_ping) - total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.arp_resp, [self.arp_src_port], timeout=self.TIMEOUT) + total_rcv_pkt_cnt = testutils.count_matched_packets_all_ports(self, self.arp_resp, [self.arp_src_port], timeout=self.PKT_TOUT) self.log("Send %5d Received %5d arp ping" % (self.arp_ping_pkts, total_rcv_pkt_cnt), True) return total_rcv_pkt_cnt diff --git a/ansible/roles/test/files/ptftests/arista.py b/ansible/roles/test/files/ptftests/arista.py index 7bab31fff52..53885eb62f0 100644 --- a/ansible/roles/test/files/ptftests/arista.py +++ b/ansible/roles/test/files/ptftests/arista.py @@ -64,6 +64,9 @@ def connect(self): self.do_cmd('enable') self.do_cmd('terminal length 0') + version_output = self.do_cmd('show version') + self.veos_version = self.parse_version(version_output) + return self.shell def get_arista_prompt(self, first_prompt): @@ -124,12 +127,10 @@ def run(self): bgp_neig_output = self.do_cmd('show ip bgp neighbors') info['bgp_neig'] = self.parse_bgp_neighbor(bgp_neig_output) - bgp_route_v4_output = self.do_cmd('show ip route bgp | json') - v4_routing_ok = self.parse_bgp_route(bgp_route_v4_output, self.v4_routes) + v4_routing_ok, bgp_route_v4_output = self.check_bgp_route(self.v4_routes) info['bgp_route_v4'] = v4_routing_ok - bgp_route_v6_output = self.do_cmd("show ipv6 route bgp | json") - v6_routing_ok = self.parse_bgp_route(bgp_route_v6_output, self.v6_routes) + v6_routing_ok, bgp_route_v6_output = self.check_bgp_route(self.v6_routes, ipv6=True) info["bgp_route_v6"] = v6_routing_ok portchannel_output = self.do_cmd("show interfaces po1 | json") @@ -350,6 +351,18 @@ def parse_bgp_route(self, output, expects): return set(expects) == prefixes + def check_bgp_route(self, expects, ipv6=False): + cmd = 'show ip route {} | json' + if ipv6: + cmd = 'show ipv6 route {} | json' + + ok = True + for prefix in set(expects): + output = self.do_cmd(cmd.format(prefix)) + ok &= self.parse_bgp_route(output, [prefix]) + + return ok, output + def get_bgp_info(self): # Retreive BGP info (peer addr, AS) for the dut and neighbor neigh_bgp = {} @@ -373,7 +386,16 @@ def change_bgp_neigh_state(self, asn, is_up=True): state = ['shut', 'no shut'] self.do_cmd('configure') self.do_cmd('router bgp %s' % asn) - self.do_cmd('%s' % state[is_up]) + if self.veos_version < 4.20: + self.do_cmd('%s' % state[is_up]) + else: + if is_up == True: + self.do_cmd('%s' % state[is_up]) + else: + # shutdown BGP will pop confirm message, the message is + # "You are attempting to shutdown BGP. Are you sure you want to shutdown? [confirm]" + self.do_cmd('%s' % state[is_up], prompt = '[confirm]') + self.do_cmd('y') self.do_cmd('exit') self.do_cmd('exit') @@ -525,3 +547,10 @@ def check_change_time(self, output, entity, what): # Note: the first item is a placeholder return 0, change_count + + def parse_version(self, output): + version = 0 + for line in output.split('\n'): + if ('Software image version: ' in line): + version = float(re.search('([1-9]{1}\d*)(\.\d{0,2})', line).group()) + return version diff --git a/ansible/roles/test/files/ptftests/copp_tests.py b/ansible/roles/test/files/ptftests/copp_tests.py index cdbc6f4042b..68fd4a9c22c 100644 --- a/ansible/roles/test/files/ptftests/copp_tests.py +++ b/ansible/roles/test/files/ptftests/copp_tests.py @@ -6,6 +6,7 @@ # # ARPTest # DHCPTest +# DHCPTopoT1Test # LLDPTest # BGPTest # LACPTest @@ -257,6 +258,41 @@ def contruct_packet(self, port_number): return packet +# SONIC configuration has no packets to CPU for DHCP-T1 Topo +class DHCPTopoT1Test(PolicyTest): + def __init__(self): + PolicyTest.__init__(self) + # T1 DHCP no packet to packet to CPU so police rate is 0 + self.PPS_LIMIT_MIN = 0 + self.PPS_LIMIT_MAX = 0 + + def runTest(self): + self.log("DHCPTopoT1Test") + self.run_suite() + + def contruct_packet(self, port_number): + src_mac = self.my_mac[port_number] + packet = simple_udp_packet(pktlen=100, + eth_dst='ff:ff:ff:ff:ff:ff', + eth_src=src_mac, + dl_vlan_enable=False, + vlan_vid=0, + vlan_pcp=0, + dl_vlan_cfi=0, + ip_src='0.0.0.0', + ip_dst='255.255.255.255', + ip_tos=0, + ip_ttl=64, + udp_sport=68, + udp_dport=67, + ip_ihl=None, + ip_options=False, + with_udp_chksum=True + ) + + return packet + + # SONIC configuration has no policer limiting for DHCP class DHCPTest(NoPolicyTest): def __init__(self): diff --git a/ansible/roles/test/files/ptftests/device_connection.py b/ansible/roles/test/files/ptftests/device_connection.py index a29ea493b06..5a475eba671 100644 --- a/ansible/roles/test/files/ptftests/device_connection.py +++ b/ansible/roles/test/files/ptftests/device_connection.py @@ -1,5 +1,6 @@ import paramiko import logging +import socket from paramiko.ssh_exception import BadHostKeyException, AuthenticationException, SSHException logger = logging.getLogger(__name__) @@ -57,6 +58,13 @@ def execCommand(self, cmd, timeout=DEFAULT_CMD_EXECUTION_TIMEOUT_SEC): logger.error('SSH Authentiaction failure with message: %s' % authenticationException) except BadHostKeyException as badHostKeyException: logger.error('SSH Authentiaction failure with message: %s' % badHostKeyException) + except socket.timeout as e: + # The ssh session will timeout in case of a successful reboot + logger.error('Caught exception socket.timeout: {}, {}, {}'.format(repr(e), str(e), type(e))) + retValue = 255 + except Exception as e: + logger.error('Exception caught: {}, {}, type: {}'.format(repr(e), str(e), type(e))) + logger.error(sys.exc_info()) finally: client.close() diff --git a/ansible/roles/test/files/ptftests/dhcp_relay_test.py b/ansible/roles/test/files/ptftests/dhcp_relay_test.py index 2b315418372..1d1ca62066c 100644 --- a/ansible/roles/test/files/ptftests/dhcp_relay_test.py +++ b/ansible/roles/test/files/ptftests/dhcp_relay_test.py @@ -143,6 +143,9 @@ def setUp(self): self.client_ip = incrementIpAddress(self.relay_iface_ip, 1) self.client_subnet = self.test_params['relay_iface_netmask'] + self.dest_mac_address = self.test_params['dest_mac_address'] + self.client_udp_src_port = self.test_params['client_udp_src_port'] + def tearDown(self): DataplaneBaseTest.tearDown(self) @@ -153,8 +156,13 @@ def tearDown(self): """ - def create_dhcp_discover_packet(self): - return testutils.dhcp_discover_packet(eth_client=self.client_mac, set_broadcast_bit=True) + def create_dhcp_discover_packet(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT): + discover_packet = testutils.dhcp_discover_packet(eth_client=self.client_mac, set_broadcast_bit=True) + + discover_packet[scapy.Ether].dst = dst_mac + discover_packet[scapy.IP].sport = src_port + + return discover_packet def create_dhcp_discover_relayed_packet(self): my_chaddr = ''.join([chr(int(octet, 16)) for octet in self.client_mac.split(':')]) @@ -255,11 +263,18 @@ def create_dhcp_offer_relayed_packet(self): pkt = ether / ip / udp / bootp return pkt - def create_dhcp_request_packet(self): - return testutils.dhcp_request_packet(eth_client=self.client_mac, - ip_server=self.server_ip, - ip_requested=self.client_ip, - set_broadcast_bit=True) + def create_dhcp_request_packet(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT): + request_packet = testutils.dhcp_request_packet( + eth_client=self.client_mac, + ip_server=self.server_ip, + ip_requested=self.client_ip, + set_broadcast_bit=True + ) + + request_packet[scapy.Ether].dst = dst_mac + request_packet[scapy.IP].sport = src_port + + return request_packet def create_dhcp_request_relayed_packet(self): my_chaddr = ''.join([chr(int(octet, 16)) for octet in self.client_mac.split(':')]) @@ -363,9 +378,9 @@ def create_dhcp_ack_relayed_packet(self): """ # Simulate client coming on VLAN and broadcasting a DHCPDISCOVER message - def client_send_discover(self): + def client_send_discover(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT): # Form and send DHCPDISCOVER packet - dhcp_discover = self.create_dhcp_discover_packet() + dhcp_discover = self.create_dhcp_discover_packet(dst_mac, src_port) testutils.send_packet(self, self.client_port_index, dhcp_discover) # Verify that the DHCP relay actually received and relayed the DHCPDISCOVER message to all of @@ -446,8 +461,8 @@ def verify_offer_received(self): testutils.verify_packet(self, masked_offer, self.client_port_index) # Simulate our client sending a DHCPREQUEST message - def client_send_request(self): - dhcp_request = self.create_dhcp_request_packet() + def client_send_request(self, dst_mac=BROADCAST_MAC, src_port=DHCP_CLIENT_PORT): + dhcp_request = self.create_dhcp_request_packet(dst_mac, src_port) testutils.send_packet(self, self.client_port_index, dhcp_request) # Verify that the DHCP relay actually received and relayed the DHCPREQUEST message to all of @@ -522,11 +537,11 @@ def verify_ack_received(self): testutils.verify_packet(self, masked_ack, self.client_port_index) def runTest(self): - self.client_send_discover() + self.client_send_discover(self.dest_mac_address, self.client_udp_src_port) self.verify_relayed_discover() self.server_send_offer() self.verify_offer_received() - self.client_send_request() + self.client_send_request(self.dest_mac_address, self.client_udp_src_port) self.verify_relayed_request() self.server_send_ack() self.verify_ack_received() diff --git a/ansible/roles/test/files/ptftests/dir_bcast_test.py b/ansible/roles/test/files/ptftests/dir_bcast_test.py index daeb547af27..507ca81ce98 100644 --- a/ansible/roles/test/files/ptftests/dir_bcast_test.py +++ b/ansible/roles/test/files/ptftests/dir_bcast_test.py @@ -24,14 +24,14 @@ class BcastTest(BaseTest): ''' @summary: Overview of functionality - Test sends a directed broadcast packet on one of the non-VLAN RIF interface and destined to the + Test sends a directed broadcast packet on one of the non-VLAN RIF interface and destined to the broadcast IP of the VLAN RIF. It expects the packet to be broadcasted to all the member port of - VLAN - + VLAN + This class receives a text file containing the VLAN IP address/prefix and the member port list - For the device configured with VLAN interface and member ports, - - IP/UDP frame, UDP port - DHCP server port, Dst Mac = Router MAC, Dst IP = Directed Broadcast IP + For the device configured with VLAN interface and member ports, + - IP frame, Dst Mac = Router MAC, Dst IP = Directed Broadcast IP ''' #--------------------------------------------------------------------- @@ -39,6 +39,7 @@ class BcastTest(BaseTest): #--------------------------------------------------------------------- BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff' DHCP_SERVER_PORT = 67 + TEST_SRC_IP = "1.1.1.1" # Some src IP def __init__(self): ''' @@ -71,8 +72,10 @@ def setUpVlan(self, file_path): for line in f.readlines(): entry = line.split(' ', 1) prefix = ip_network(unicode(entry[0])) + if prefix.version != 4: + continue self._vlan_dict[prefix] = [int(i) for i in entry[1].split()] - + #--------------------------------------------------------------------- def check_all_dir_bcast(self): @@ -83,56 +86,94 @@ def check_all_dir_bcast(self): bcast_ip = str(ip_network(vlan_pfx).broadcast_address) dst_port_list = self._vlan_dict[vlan_pfx] self.check_ip_dir_bcast(bcast_ip, dst_port_list) + self.check_bootp_dir_bcast(bcast_ip, dst_port_list) #--------------------------------------------------------------------- def check_ip_dir_bcast(self, dst_bcast_ip, dst_port_list): ''' - @summary: Check unicast IP forwarding and receiving on all member ports. + @summary: Check directed broadcast IP forwarding and receiving on all member ports. ''' - ip_src = "10.0.0.100" # Some src_ip - ip_dst = dst_bcast_ip + ip_src = self.TEST_SRC_IP + ip_dst = dst_bcast_ip src_mac = self.dataplane.get_mac(0, 0) bcast_mac = self.BROADCAST_MAC - udp_port = self.DHCP_SERVER_PORT - - pkt = simple_udp_packet(eth_dst=self.router_mac, - eth_src=src_mac, - ip_src=ip_src, - ip_dst=ip_dst, - udp_sport=udp_port, - udp_dport=udp_port) - exp_pkt = simple_udp_packet(eth_dst=bcast_mac, - eth_src=self.router_mac, - ip_src=ip_src, - ip_dst=ip_dst, - udp_sport=udp_port, - udp_dport=udp_port) + pkt = simple_ip_packet(eth_dst=self.router_mac, + eth_src=src_mac, + ip_src=ip_src, + ip_dst=ip_dst) + + exp_pkt = simple_ip_packet(eth_dst=bcast_mac, + eth_src=self.router_mac, + ip_src=ip_src, + ip_dst=ip_dst) masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") - - src_port = random.choice([port for port in self.src_ports if port not in dst_port_list]) + + src_port = random.choice([port for port in self.src_ports if port not in dst_port_list]) send_packet(self, src_port, pkt) logging.info("Sending packet from port " + str(src_port) + " to " + ip_dst) pkt_count = count_matched_packets_all_ports(self, masked_exp_pkt, dst_port_list) - ''' + ''' Check if broadcast packet is received on all member ports of vlan ''' logging.info("Received " + str(pkt_count) + " broadcast packets, expecting " + str(len(dst_port_list))) assert (pkt_count == len(dst_port_list)) - + + return + + #--------------------------------------------------------------------- + + def check_bootp_dir_bcast(self, dst_bcast_ip, dst_port_list): + ''' + @summary: Check directed broadcast BOOTP packet forwarding and receiving on all member ports. + ''' + ip_src = self.TEST_SRC_IP + ip_dst = dst_bcast_ip + src_mac = self.dataplane.get_mac(0, 0) + bcast_mac = self.BROADCAST_MAC + udp_port = self.DHCP_SERVER_PORT + + pkt = simple_udp_packet(eth_dst=self.router_mac, + eth_src=src_mac, + ip_src=ip_src, + ip_dst=ip_dst, + udp_sport=udp_port, + udp_dport=udp_port) + + exp_pkt = simple_udp_packet(eth_dst=bcast_mac, + eth_src=self.router_mac, + ip_src=ip_src, + ip_dst=ip_dst, + udp_sport=udp_port, + udp_dport=udp_port) + + masked_exp_pkt = Mask(exp_pkt) + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum") + masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl") + + src_port = random.choice([port for port in self.src_ports if port not in dst_port_list]) + send_packet(self, src_port, pkt) + logging.info("Sending BOOTP packet from port " + str(src_port) + " to " + ip_dst) + + pkt_count = count_matched_packets_all_ports(self, masked_exp_pkt, dst_port_list) + ''' + Check if broadcast BOOTP packet is received on all member ports of vlan + ''' + logging.info("Received " + str(pkt_count) + " broadcast BOOTP packets, expecting " + str(len(dst_port_list))) + assert (pkt_count == len(dst_port_list)) + return #--------------------------------------------------------------------- def runTest(self): """ - @summary: Send Broadcast IP packet destined to a VLAN RIF and with unicast Dst MAC + @summary: Send Broadcast IP packet destined to a VLAN RIF and with unicast Dst MAC Expect the packet to be received on all member ports of VLAN """ self.check_all_dir_bcast() - diff --git a/ansible/roles/test/files/ptftests/fdb_test.py b/ansible/roles/test/files/ptftests/fdb_test.py index 26f57370f4a..8b9f9581758 100644 --- a/ansible/roles/test/files/ptftests/fdb_test.py +++ b/ansible/roles/test/files/ptftests/fdb_test.py @@ -1,20 +1,16 @@ -import fdb -import json -import logging +import time import subprocess +import logging -from collections import defaultdict -from ipaddress import ip_address, ip_network - +from ipaddress import ip_address import ptf -import ptf.packet as scapy -import ptf.dataplane as dataplane - -from ptf import config from ptf.base_tests import BaseTest from ptf.testutils import * +import fdb + class FdbTest(BaseTest): + def __init__(self): BaseTest.__init__(self) self.test_params = test_params_get() @@ -36,16 +32,11 @@ def setUp(self): self.dataplane = ptf.dataplane_instance self.fdb = fdb.Fdb(self.test_params['fdb_info']) self.vlan_ip = ip_address(unicode(self.test_params['vlan_ip'])) + self.dummy_mac_prefix = self.test_params["dummy_mac_prefix"] + self.dummy_mac_number = int(self.test_params["dummy_mac_number"]) + self.dummy_mac_table = {} self.setUpFdb() - - self.log("Start arp_responder") - self.shell(["supervisorctl", "start", "arp_responder"]) - #-------------------------------------------------------------------------- - - def tearDown(self): - self.log("Stop arp_responder") - self.shell(["supervisorctl", "stop", "arp_responder"]) #-------------------------------------------------------------------------- def setUpFdb(self): @@ -55,30 +46,27 @@ def setUpFdb(self): mac = self.dataplane.get_mac(0, member) self.fdb.insert(mac, member) - # Send a packet to switch to populate the layer 2 table + # Send a packet to switch to populate the layer 2 table with MAC of PTF interface pkt = simple_eth_packet(eth_dst=self.test_params['router_mac'], eth_src=mac, eth_type=0x1234) send(self, member, pkt) - #-------------------------------------------------------------------------- - def setUpArpResponder(self): - vlan_table = self.fdb.get_vlan_table() - arp_table = self.fdb.get_arp_table() - d = defaultdict(list) - for vlan in vlan_table: - network = ip_network(vlan) - length = int(network[-1]) - int(network[0]) - index = 1 - for member in vlan_table[vlan]: - iface = "eth%d" % member - index = index + 1 if network[index + 1] != self.vlan_ip else index + 2 - d[iface].append(str(network[index])) - with open('/tmp/from_t1.json', 'w') as file: - json.dump(d, file) + # Send packets to switch to populate the layer 2 table with dummy MACs for each port + # Totally 10 dummy MACs for each port, send 1 packet for each dummy MAC + dummy_macs = [self.dummy_mac_prefix + ":{:02x}:{:02x}".format(member, i) + for i in range(self.dummy_mac_number)] + self.dummy_mac_table[member] = dummy_macs + for dummy_mac in dummy_macs: + pkt = simple_eth_packet(eth_dst=self.test_params['router_mac'], + eth_src=dummy_mac, + eth_type=0x1234) + send(self, member, pkt) + + time.sleep(2) #-------------------------------------------------------------------------- - def check_route(self, src_mac, dst_mac, src_port, dst_port): + def test_l2_forwarding(self, src_mac, dst_mac, src_port, dst_port): pkt = simple_eth_packet(eth_dst=dst_mac, eth_src=src_mac, eth_type=0x1234) @@ -93,5 +81,57 @@ def runTest(self): for vlan in vlan_table: for src in vlan_table[vlan]: for dst in [i for i in vlan_table[vlan] if i != src]: - self.check_route(arp_table[src], arp_table[dst], src, dst) + self.test_l2_forwarding(arp_table[src], arp_table[dst], src, dst) + + for dummy_mac in self.dummy_mac_table[dst]: + self.test_l2_forwarding(arp_table[src], dummy_mac, src, dst) + #-------------------------------------------------------------------------- + + +class FdbConfigReloadTest(BaseTest): + + def __init__(self): + BaseTest.__init__(self) + self.test_params = test_params_get() + #-------------------------------------------------------------------------- + + def setUp(self): + self.dataplane = ptf.dataplane_instance + self.router_mac = self.test_params["router_mac"] + self.vlan_ports = self.test_params["vlan_ports"].split() + self.lag_ports = self.test_params["lag_ports"].split() + self.dummy_mac_prefix = self.test_params["dummy_mac_prefix"] + self.vlan_port_idx = 0 + self.lag_port_idx = 0 + #-------------------------------------------------------------------------- + + def runTest(self): + max_time = 300 # seconds + i = 0 + start_time = time.time() + while i < 0xffff - 128 and time.time() - start_time < max_time: + # Send frame to vlan and lag port in interleaved way + if i % 2 == 0: + port = self.vlan_ports[self.vlan_port_idx] + self.vlan_port_idx += 1 + if self.vlan_port_idx >= len(self.vlan_ports): + self.vlan_port_idx = 0 + else: + port = self.lag_ports[self.lag_port_idx] + self.lag_port_idx += 1 + if self.lag_port_idx >= len(self.lag_ports): + self.lag_port_idx = 0 + + field_0 = i % 256 + field_1 = i / 256 + dummy_mac = self.dummy_mac_prefix + ":{:02x}:{:02x}".format(field_1, field_0) + pkt = simple_eth_packet(eth_dst=self.router_mac, + eth_src=dummy_mac, + eth_type=0x1234) + logging.debug("Send packet to port %s, src mac: %s" % (str(port), dummy_mac)) + send(self, port, pkt) + i += 1 + + if i % len(self.lag_ports)*2 == 0: + time.sleep(0.1) #-------------------------------------------------------------------------- diff --git a/ansible/roles/test/files/ptftests/fib.py b/ansible/roles/test/files/ptftests/fib.py index 25af9f6a214..5686bf516c0 100644 --- a/ansible/roles/test/files/ptftests/fib.py +++ b/ansible/roles/test/files/ptftests/fib.py @@ -17,9 +17,9 @@ ] EXCLUDE_IPV6_PREFIXES = [ - '::/0', # Currently no IPv6 default route '::/128', # Unspecified RFC 4291 '::1/128', # Loopback RFC 4291 + 'fe80::/10', # Link local RFC 4291 'ff00::/8' # Multicast RFC 4291 ] @@ -72,6 +72,13 @@ def __getitem__(self, ip): elif ip.version is 6: return self._ipv6_lpm_dict[str(ip)] + def __contains__(self, ip): + ip_obj = ip_address(unicode(ip)) + if ip_obj.version == 4: + return self._ipv4_lpm_dict.contains(ip) + elif ip_obj.version == 6: + return self._ipv6_lpm_dict.contains(ip) + def ipv4_ranges(self): return self._ipv4_lpm_dict.ranges() diff --git a/ansible/roles/test/files/ptftests/fib_test.py b/ansible/roles/test/files/ptftests/fib_test.py index 89c95498005..1ed3ba477a4 100644 --- a/ansible/roles/test/files/ptftests/fib_test.py +++ b/ansible/roles/test/files/ptftests/fib_test.py @@ -10,11 +10,8 @@ #--------------------------------------------------------------------- # Global imports #--------------------------------------------------------------------- -import ipaddress import logging import random -import socket -import sys import ptf import ptf.packet as scapy @@ -61,6 +58,13 @@ class FibTest(BaseTest): DEFAULT_BALANCING_RANGE = 0.25 BALANCING_TEST_TIMES = 10000 DEFAULT_BALANCING_TEST_RATIO = 0.0001 + ACTION_FWD = 'fwd' + ACTION_DROP = 'drop' + + _required_params = [ + 'fib_info', + 'router_mac', + ] def __init__(self): ''' @@ -68,91 +72,130 @@ def __init__(self): ''' BaseTest.__init__(self) self.test_params = test_params_get() + self.check_required_params() #--------------------------------------------------------------------- def setUp(self): ''' @summary: Setup for the test - Two test parameters are used: + Some test parameters are used: - fib_info: the FIB information generated according to the testbed - router_mac: the MAC address of the DUT used to create the eth_dst of the packet - testbed_type: the type of the testbed used to determine the source port - - src_port: this list should include all enabled ports, both up links + - src_ports: this list should include all enabled ports, both up links and down links. + - pkt_action: expect to receive test traffic or not. Default: fwd + - ipv4/ipv6: enable ipv4/ipv6 tests + + Other test parameters: + - ttl: ttl of test pkts. Auto decrease 1 for expected pkts. + - ip_options enable ip option header in ipv4 pkts. Default: False(disable) + - src_vid vlan tag id of src pkts. Default: None(untag) + - dst_vid vlan tag id of dst pkts. Default: None(untag) + TODO: Have a separate line in fib_info/file to indicate all UP ports ''' self.dataplane = ptf.dataplane_instance - self.fib = fib.Fib(self.test_params['fib_info']) - self.router_mac = self.test_params['router_mac'] - self.pktlen = self.test_params['testbed_mtu'] + fib_info = self.test_params.get('fib_info', None) + self.fib = fib.Fib(self.test_params['fib_info']) if fib_info is not None else None + self.router_mac = self.test_params.get('router_mac', None) + self.pktlen = self.test_params.get('testbed_mtu', 1500) self.test_ipv4 = self.test_params.get('ipv4', True) self.test_ipv6 = self.test_params.get('ipv6', True) + self.test_balancing = self.test_params.get('test_balancing', True) self.balancing_range = self.test_params.get('balancing_range', self.DEFAULT_BALANCING_RANGE) + self.balancing_test_times = self.test_params.get('balancing_test_times', self.BALANCING_TEST_TIMES) self.balancing_test_ratio = self.test_params.get('balancing_test_ratio', self.DEFAULT_BALANCING_TEST_RATIO) - if self.test_params['testbed_type'] == 't1' or self.test_params['testbed_type'] == 't1-lag': - self.src_ports = range(0, 32) - if self.test_params['testbed_type'] == 't1-64-lag' or self.test_params['testbed_type'] == 't1-64-lag-clet': - self.src_ports = [0, 1, 4, 5, 16, 17, 20, 21, 34, 36, 37, 38, 39, 42, 44, 45, 46, 47, 50, 52, 53, 54, 55, 58, 60, 61, 62, 63] - if self.test_params['testbed_type'] == 't0': - self.src_ports = range(1, 25) + range(28, 32) - if self.test_params['testbed_type'] == 't0-52': - self.src_ports = range(0, 52) - if self.test_params['testbed_type'] == 't0-56': - self.src_ports = [0, 1, 4, 5, 8, 9] + range(12, 18) + [20, 21, 24, 25, 28, 29, 32, 33, 36, 37] + range(40, 46) + [48, 49, 52, 53] - if self.test_params['testbed_type'] == 't0-64': - self.src_ports = range(0, 2) + range(4, 18) + range(20, 33) + range(36, 43) + range(48, 49) + range(52, 59) - if self.test_params['testbed_type'] == 't0-116': - self.src_ports = range(0, 120) + self.pkt_action = self.test_params.get('pkt_action', self.ACTION_FWD) + self.ttl = self.test_params.get('ttl', 64) + self.ip_options = self.test_params.get('ip_options', False) + self.src_vid = self.test_params.get('src_vid', None) + self.dst_vid = self.test_params.get('dst_vid', None) + + self.src_ports = self.test_params.get('src_ports', None) + if self.src_ports is None: + # Provide the list of all UP interfaces with index in sequence order starting from 0 + if self.test_params['testbed_type'] == 't1' or self.test_params['testbed_type'] == 't1-lag' or self.test_params['testbed_type'] == 't0-64-32': + self.src_ports = range(0, 32) + if self.test_params['testbed_type'] == 't1-64-lag' or self.test_params['testbed_type'] == 't1-64-lag-clet': + self.src_ports = [0, 1, 4, 5, 16, 17, 20, 21, 34, 36, 37, 38, 39, 42, 44, 45, 46, 47, 50, 52, 53, 54, 55, 58, 60, 61, 62, 63] + if self.test_params['testbed_type'] == 't0': + self.src_ports = range(1, 25) + range(28, 32) + if self.test_params['testbed_type'] == 't0-52': + self.src_ports = range(0, 52) + if self.test_params['testbed_type'] == 't0-56': + self.src_ports = [0, 1, 4, 5, 8, 9] + range(12, 18) + [20, 21, 24, 25, 28, 29, 32, 33, 36, 37] + range(40, 46) + [48, 49, 52, 53] + if self.test_params['testbed_type'] == 't0-64': + self.src_ports = range(0, 2) + range(4, 18) + range(20, 33) + range(36, 43) + range(48, 49) + range(52, 59) + if self.test_params['testbed_type'] == 't0-116': + self.src_ports = range(0, 120) #--------------------------------------------------------------------- - def check_ip_range(self, ipv4=True): + def check_required_params(self): + for param in self._required_params: + if param not in self.test_params: + raise Exception("Missing required parameter {}".format(param)) + + def check_ip_ranges(self, ipv4=True): if ipv4: ip_ranges = self.fib.ipv4_ranges() else: ip_ranges = self.fib.ipv6_ranges() for ip_range in ip_ranges: - - # Get the expected list of ports that would receive the packets - exp_port_list = self.fib[ip_range.get_first_ip()].get_next_hop_list() - # Choose random one source port from all ports excluding the expected ones - src_port = random.choice([port for port in self.src_ports if port not in exp_port_list]) - - if not exp_port_list: - continue - - logging.info("Check IP range:" + str(ip_range) + " on " + str(exp_port_list) + "...") - - # Send a packet with the first IP in the range - self.check_ip_route(src_port, ip_range.get_first_ip(), exp_port_list, ipv4) - # Send a packet with the last IP in the range - if ip_range.length() > 1: - self.check_ip_route(src_port, ip_range.get_last_ip(), exp_port_list, ipv4) - # Send a packet with a random IP in the range - if ip_range.length() > 2: - self.check_ip_route(src_port, ip_range.get_random_ip(), exp_port_list, ipv4) - - # Test traffic balancing across ECMP/LAG members - if len(exp_port_list) > 1 and random.random() < self.balancing_test_ratio: - logging.info("Check IP range balancing...") - dst_ip = ip_range.get_random_ip() - hit_count_map = {} - for i in range(0, self.BALANCING_TEST_TIMES): - (matched_index, received) = self.check_ip_route(src_port, dst_ip, exp_port_list, ipv4) - hit_count_map[matched_index] = hit_count_map.get(matched_index, 0) + 1 - self.check_balancing(self.fib[dst_ip].get_next_hop(), hit_count_map) + if ip_range.get_first_ip() in self.fib: + next_hop = self.fib[ip_range.get_first_ip()] + self.check_ip_range(ip_range, next_hop, ipv4) + + def check_ip_range(self, ip_range, next_hop, ipv4=True): + # Get the expected list of ports that would receive the packets + exp_port_list = next_hop.get_next_hop_list() + # Choose random one source port from all ports excluding the expected ones + src_port = random.choice([port for port in self.src_ports if port not in exp_port_list]) + + if not exp_port_list: + logging.info("Skip check IP range {} with nexthop {}".format(ip_range, next_hop)) + return + + logging.info("Check IP range:" + str(ip_range) + " on " + str(exp_port_list) + "...") + + # Send a packet with the first IP in the range + self.check_ip_route(src_port, ip_range.get_first_ip(), exp_port_list, ipv4) + # Send a packet with the last IP in the range + if ip_range.length() > 1: + self.check_ip_route(src_port, ip_range.get_last_ip(), exp_port_list, ipv4) + # Send a packet with a random IP in the range + if ip_range.length() > 2: + self.check_ip_route(src_port, ip_range.get_random_ip(), exp_port_list, ipv4) + + # Test traffic balancing across ECMP/LAG members + if (self.test_balancing and self.pkt_action == self.ACTION_FWD + and len(exp_port_list) > 1 + and random.random() < self.balancing_test_ratio): + logging.info("Check IP range balancing...") + dst_ip = ip_range.get_random_ip() + hit_count_map = {} + for i in range(0, self.balancing_test_times): + (matched_index, received) = self.check_ip_route(src_port, dst_ip, exp_port_list, ipv4) + hit_count_map[matched_index] = hit_count_map.get(matched_index, 0) + 1 + self.check_balancing(next_hop.get_next_hop(), hit_count_map) def check_ip_route(self, src_port, dst_ip_addr, dst_port_list, ipv4=True): if ipv4: - (matched_index, received) = self.check_ipv4_route(src_port, dst_ip_addr, dst_port_list) + res = self.check_ipv4_route(src_port, dst_ip_addr, dst_port_list) else: - (matched_index, received) = self.check_ipv6_route(src_port, dst_ip_addr, dst_port_list) + res = self.check_ipv6_route(src_port, dst_ip_addr, dst_port_list) + + if self.pkt_action == self.ACTION_DROP: + return res + + (matched_index, received) = res assert received @@ -182,7 +225,10 @@ def check_ipv4_route(self, src_port, dst_ip_addr, dst_port_list): ip_dst=ip_dst, tcp_sport=sport, tcp_dport=dport, - ip_ttl=64) + ip_ttl=self.ttl, + ip_options=self.ip_options, + dl_vlan_enable=self.src_vid is not None, + vlan_vid=self.src_vid or 0) exp_pkt = simple_tcp_packet( self.pktlen, eth_src=self.router_mac, @@ -190,14 +236,20 @@ def check_ipv4_route(self, src_port, dst_ip_addr, dst_port_list): ip_dst=ip_dst, tcp_sport=sport, tcp_dport=dport, - ip_ttl=63) + ip_ttl=max(self.ttl-1, 0), + ip_options=self.ip_options, + dl_vlan_enable=self.dst_vid is not None, + vlan_vid=self.dst_vid or 0) masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst") send_packet(self, src_port, pkt) logging.info("Sending packet from port " + str(src_port) + " to " + ip_dst) - return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + if self.pkt_action == self.ACTION_FWD: + return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + elif self.pkt_action == self.ACTION_DROP: + return verify_no_packet_any(self, masked_exp_pkt, dst_port_list) #--------------------------------------------------------------------- def check_ipv6_route(self, src_port, dst_ip_addr, dst_port_list): @@ -222,7 +274,9 @@ def check_ipv6_route(self, src_port, dst_ip_addr, dst_port_list): ipv6_src=ip_src, tcp_sport=sport, tcp_dport=dport, - ipv6_hlim=64) + ipv6_hlim=self.ttl, + dl_vlan_enable=self.src_vid is not None, + vlan_vid=self.src_vid or 0) exp_pkt = simple_tcpv6_packet( pktlen=self.pktlen, eth_src=self.router_mac, @@ -230,14 +284,19 @@ def check_ipv6_route(self, src_port, dst_ip_addr, dst_port_list): ipv6_src=ip_src, tcp_sport=sport, tcp_dport=dport, - ipv6_hlim=63) + ipv6_hlim=max(self.ttl-1, 0), + dl_vlan_enable=self.dst_vid is not None, + vlan_vid=self.dst_vid or 0) masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(scapy.Ether,"dst") send_packet(self, src_port, pkt) logging.info("Sending packet from port " + str(src_port) + " to " + ip_dst) - return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + if self.pkt_action == self.ACTION_FWD: + return verify_packet_any_port(self, masked_exp_pkt, dst_port_list) + elif self.pkt_action == self.ACTION_DROP: + return verify_no_packet_any(self, masked_exp_pkt, dst_port_list) #--------------------------------------------------------------------- def check_within_expected_range(self, actual, expected): ''' @@ -289,7 +348,7 @@ def runTest(self): """ # IPv4 Test if (self.test_ipv4): - self.check_ip_range() + self.check_ip_ranges() # IPv6 Test if (self.test_ipv6): - self.check_ip_range(ipv4=False) + self.check_ip_ranges(ipv4=False) diff --git a/ansible/roles/test/files/ptftests/lpm.py b/ansible/roles/test/files/ptftests/lpm.py index f30fdcef4e9..609613d4eea 100644 --- a/ansible/roles/test/files/ptftests/lpm.py +++ b/ansible/roles/test/files/ptftests/lpm.py @@ -103,3 +103,6 @@ def ranges(self): interval = self.IpInterval(sorted_boundaries[index], ip_address(u'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff')) ranges.append(interval) return ranges + + def contains(self, key): + return key in self._subnet_tree diff --git a/ansible/roles/test/files/ptftests/mtu_test.py b/ansible/roles/test/files/ptftests/mtu_test.py index 60d7c595ab3..4645980424f 100644 --- a/ansible/roles/test/files/ptftests/mtu_test.py +++ b/ansible/roles/test/files/ptftests/mtu_test.py @@ -27,14 +27,13 @@ class MtuTest(BaseTest): back. It also sends a jumbo frame to a route destination for verifying the forwarding functionality - For the device configured with IP-MTU=9100, PHY-MTU=9114, + By default.For the device configured with IP-MTU=9100, PHY-MTU=9114, - ICMP/IP frame, the packet-len is 9114 (This includes the 14 bytes Layer 2 Ethernet header) ''' #--------------------------------------------------------------------- # Class variables #--------------------------------------------------------------------- - DEFAULT_PACKET_LEN = 9114 ICMP_HDR_LEN = 8 def __init__(self): @@ -50,6 +49,7 @@ def setUp(self): self.dataplane = ptf.dataplane_instance self.router_mac = self.test_params['router_mac'] self.testbed_type = self.test_params['testbed_type'] + self.testbed_mtu = self.test_params['testbed_mtu'] #--------------------------------------------------------------------- @@ -60,8 +60,7 @@ def check_icmp_mtu(self): ip_src = "10.0.0.1" ip_dst = "10.0.0.0" src_mac = self.dataplane.get_mac(0, 0) - - pktlen = self.DEFAULT_PACKET_LEN + pktlen = self.pktlen pkt = simple_icmp_packet(pktlen=pktlen, eth_dst=self.router_mac, @@ -107,14 +106,14 @@ def check_ip_mtu(self): ip_dst = "10.0.0.63" src_mac = self.dataplane.get_mac(0, 0) - pkt = simple_ip_packet(pktlen=self.DEFAULT_PACKET_LEN, + pkt = simple_ip_packet(pktlen=self.pktlen, eth_dst=self.router_mac, eth_src=src_mac, ip_src=ip_src, ip_dst=ip_dst, ip_ttl=64) - exp_pkt = simple_ip_packet(pktlen=self.DEFAULT_PACKET_LEN, + exp_pkt = simple_ip_packet(pktlen=self.pktlen, eth_src=self.router_mac, ip_src=ip_src, ip_dst=ip_dst, @@ -148,5 +147,6 @@ def runTest(self): @summary: Send packet(Max MTU) to test on Ping request/response and unicast IP destination. Expect the packet to be received from one of the expected ports """ + self.pktlen = self.testbed_mtu self.check_icmp_mtu() self.check_ip_mtu() diff --git a/ansible/roles/test/files/ptftests/pfc_wd.py b/ansible/roles/test/files/ptftests/pfc_wd.py index 3389ae4e0d8..4c8d02837dd 100644 --- a/ansible/roles/test/files/ptftests/pfc_wd.py +++ b/ansible/roles/test/files/ptftests/pfc_wd.py @@ -53,7 +53,10 @@ def runTest(self): dport = random.randint(0, 65535) ip_src = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))) ip_src =ipaddress.IPv4Address(unicode(ip_src,'utf-8')) - while ip_src == ipaddress.IPv4Address(unicode(self.ip_dst,'utf-8')) or ip_src.is_multicast or ip_src.is_private or ip_src.is_global or ip_src.is_reserved: + if not isinstance(self.ip_dst, unicode): + self.ip_dst = unicode(self.ip_dst, 'utf-8') + ip_dst = ipaddress.IPv4Address(self.ip_dst) + while ip_src == ip_dst or ip_src.is_multicast or ip_src.is_private or ip_src.is_global or ip_src.is_reserved: ip_src = socket.inet_ntoa(struct.pack('>I', random.randint(1, 0xffffffff))) ip_src =ipaddress.IPv4Address(unicode(ip_src,'utf-8')) diff --git a/ansible/roles/test/files/ptftests/sad_path.py b/ansible/roles/test/files/ptftests/sad_path.py index 85e61d20e5a..8093e1c6193 100644 --- a/ansible/roles/test/files/ptftests/sad_path.py +++ b/ansible/roles/test/files/ptftests/sad_path.py @@ -320,9 +320,9 @@ def build_route_config(self): def get_bgp_route_cnt(self, is_up=True, v4=True): # extract the neigh ip and current number of routes if v4: - cmd = 'show ip bgp summary | sed \'1,/Neighbor/d;/^$/,$d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10' + cmd = 'show ip bgp summary | sed \'1,/Neighbor/d;/^$/,$d;/^-/d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10' else: - cmd = 'show ipv6 bgp summary | sed \'1,/Neighbor/d;/^$/,$d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10' + cmd = 'show ipv6 bgp summary | sed \'1,/Neighbor/d;/^$/,$d;/^-/d\' | sed \'s/\s\s*/ /g\' | cut -d\' \' -f 1,10' stdout, stderr, return_code = self.dut_connection.execCommand(cmd) if return_code != 0: @@ -431,7 +431,7 @@ def verify_bgp_dut_state(self, state='Idle'): if key not in ['v4', 'v6']: continue self.log.append('Verifying if the DUT side BGP peer %s is %s' % (self.neigh_bgps[vm][key], states)) - stdout, stderr, return_code = self.dut_connection.execCommand('show ip bgp neighbor %s' % self.neigh_bgps[vm][key]) + stdout, stderr, return_code = self.dut_connection.execCommand('show ip bgp neighbors %s' % self.neigh_bgps[vm][key]) if return_code == 0: for line in stdout: if 'BGP state' in line: diff --git a/ansible/roles/test/files/ptftests/vxlan-decap.py b/ansible/roles/test/files/ptftests/vxlan-decap.py index 4a86e274322..e001ce215bb 100644 --- a/ansible/roles/test/files/ptftests/vxlan-decap.py +++ b/ansible/roles/test/files/ptftests/vxlan-decap.py @@ -1,4 +1,4 @@ -# ptf -t "config_file='/tmp/vxlan_decap.json';vxlan_enabled=True" --platform-dir ptftests --test-dir ptftests --platform remote vxlan-decap +# ptf -t "config_file='/tmp/vxlan_decap.json';vxlan_enabled=True;dut_host=10.0.0.1;sonic_admin_user=admin;sonic_admin_password=admin" --platform-dir ptftests --test-dir ptftests --platform remote vxlan-decap # The test checks vxlan decapsulation for the dataplane. # The test runs three tests for each vlan on the DUT: @@ -6,10 +6,13 @@ # 2. 'RegularLAGtoVLAN' : Sends regular packets to PortChannel interfaces and expects to see the packets on the corresponding vlan interface. # 3. 'RegularVLANtoLAG' : Sends regular packets to Vlan member interfaces and expects to see the packets on the one of PortChannel interfaces. # -# The test has two parameters: +# The test has 6 parameters: # 1. 'config_file' is a filename of a file which contains all necessary information to run the test. The file is populated by ansible. This parameter is mandatory. # 2. 'vxlan_enabled' is a boolean parameter. When the parameter is true the test will fail if vxlan test failing. When the parameter is false the test will not fail. By default this parameter is false. # 3. 'count' is an integer parameter. It defines how many packets are sent for each combination of ingress/egress interfaces. By default the parameter equal to 1 +# 4. 'dut_host' is the ip address of dut. +# 5. 'sonic_admin_user': User name to login dut +# 6. 'sonic_admin_password': Password for sonic_admin_user to login dut import sys import os.path @@ -22,6 +25,7 @@ from ptf.testutils import * from ptf.dataplane import match_exp_pkt from ptf.mask import Mask +from ptf.testutils import dp_poll import datetime import subprocess import traceback @@ -29,6 +33,56 @@ import struct from pprint import pprint from pprint import pformat +from device_connection import DeviceConnection +import re + +def count_matched_packets_helper(test, exp_packet, exp_packet_number, port, device_number=0, timeout=1): + """ + Add exp_packet_number to original ptf interface in order to + stop waiting when expected number of packets is received + """ + if timeout <= 0: + raise Exception("%s() requires positive timeout value." % sys._getframe().f_code.co_name) + + total_rcv_pkt_cnt = 0 + while True: + result = dp_poll(test, device_number=device_number, port_number=port, timeout=timeout) + if isinstance(result, test.dataplane.PollSuccess): + if ptf.dataplane.match_exp_pkt(exp_packet, result.packet): + total_rcv_pkt_cnt += 1 + if total_rcv_pkt_cnt == exp_packet_number: + break + else: + break + + return total_rcv_pkt_cnt + +def count_matched_packets_all_ports_helper(test, exp_packet, exp_packet_number, ports=[], device_number=0, timeout=1): + """ + Add exp_packet_number to original ptf interface in order to + stop waiting when expected number of packets is received + """ + if timeout <= 0: + raise Exception("%s() requires positive timeout value." % sys._getframe().f_code.co_name) + + last_matched_packet_time = time.time() + total_rcv_pkt_cnt = 0 + while True: + if (time.time() - last_matched_packet_time) > timeout: + break + + result = dp_poll(test, device_number=device_number, timeout=timeout) + if isinstance(result, test.dataplane.PollSuccess): + if (result.port in ports and + ptf.dataplane.match_exp_pkt(exp_packet, result.packet)): + total_rcv_pkt_cnt += 1 + if total_rcv_pkt_cnt == exp_packet_number: + break + last_matched_packet_time = time.time() + else: + break + + return total_rcv_pkt_cnt class Vxlan(BaseTest): def __init__(self): @@ -51,8 +105,9 @@ def cmd(self, cmds): def readMacs(self): addrs = {} for intf in os.listdir('/sys/class/net'): - with open('/sys/class/net/%s/address' % intf) as fp: - addrs[intf] = fp.read().strip() + if os.path.isdir('/sys/class/net/%s' % intf): + with open('/sys/class/net/%s/address' % intf) as fp: + addrs[intf] = fp.read().strip() return addrs @@ -105,9 +160,20 @@ def setUp(self): if 'config_file' not in self.test_params: raise Exception("required parameter 'config_file' is not present") - config = self.test_params['config_file'] + if 'dut_host' not in self.test_params: + raise Exception("required parameter 'dut_host' is not present") + self.dut_host = self.test_params['dut_host'] + + if 'sonic_admin_user' not in self.test_params: + raise Exception("required parameter 'sonic_admin_user' is not present") + self.sonic_admin_user = self.test_params['sonic_admin_user'] + + if 'sonic_admin_password' not in self.test_params: + raise Exception("required parameter 'sonic_admin_password' is not present") + self.sonic_admin_password = self.test_params['sonic_admin_password'] + if not os.path.isfile(config): raise Exception("the config file %s doesn't exist" % config) @@ -135,6 +201,7 @@ def setUp(self): for name, data in graph['minigraph_vlans'].items(): test = {} test['name'] = name + test['intf_alias'] = data['members'] test['acc_ports'] = [graph['minigraph_port_indices'][member] for member in data['members']] vlan_id = int(name.replace('Vlan', '')) test['vni'] = vni_base + vlan_id @@ -172,11 +239,59 @@ def setUp(self): self.generate_ArpResponderConfig() self.cmd(["supervisorctl", "restart", "arp_responder"]) - + #Wait a short time for asp_reponder to be ready + time.sleep(10) self.dataplane.flush() + self.dut_connection = DeviceConnection( + self.dut_host, + self.sonic_admin_user, + password=self.sonic_admin_password + ) return + def check_arp_table_on_dut(self, test): + COMMAND = 'show arp' + stdout, stderr, return_code = self.dut_connection.execCommand(COMMAND) + for idx, port in enumerate(test['acc_ports']): + intf_alias = test['intf_alias'][idx] + ip_prefix = test['vlan_ip_prefixes'][port] + for line in stdout: + if re.match(r"{}.*{}.*".format(ip_prefix, intf_alias), line, re.IGNORECASE): + break + else: + return False + return True + + def check_fdb_on_dut(self, test): + COMMAND = 'fdbshow' + stdout, stderr, return_code = self.dut_connection.execCommand(COMMAND) + for idx, port in enumerate(test['acc_ports']): + mac_addr = self.ptf_mac_addrs['eth%d' % port] + intf_alias = test['intf_alias'][idx] + for line in stdout: + if re.match(r".*{}.*{}.*".format(mac_addr, intf_alias), line, re.IGNORECASE): + break + else: + return False + return True + + def wait_dut(self, test, timeout): + t = 0 + while t < timeout: + if self.check_fdb_on_dut(test): + break; + t += 1 + if t >= timeout: + return False + while t < timeout: + if self.check_arp_table_on_dut(test): + break; + t += 1 + if t >= timeout: + return False + return True + def tearDown(self): self.cmd(["supervisorctl", "stop", "arp_responder"]) return @@ -186,12 +301,13 @@ def warmup(self): err = '' trace = '' ret = 0 + TIMEOUT = 60 try: for test in self.tests: - if self.vxlan_enabled: - self.Vxlan(test, True) self.RegularLAGtoVLAN(test, True) - self.RegularVLANtoLAG(test, True) + #wait sometime for DUT to build FDB and ARP table + res = self.wait_dut(test, TIMEOUT) + self.assertTrue(res, "DUT is not ready after {} seconds".format(TIMEOUT)) except Exception as e: err = str(e) @@ -217,19 +333,21 @@ def work_test(self): try: for test in self.tests: print test['name'] - res_v, out_v = self.Vxlan(test) - print " Vxlan = ", res_v + res_f, out_f = self.RegularLAGtoVLAN(test) print " RegularLAGtoVLAN = ", res_f + self.assertTrue(res_f, "RegularLAGtoVLAN test failed:\n %s\n\ntest:\n%s" % (out_f, pformat(test))) + res_t, out_t = self.RegularVLANtoLAG(test) print " RegularVLANtoLAG = ", res_t - print + self.assertTrue(res_t, "RegularVLANtoLAG test failed:\n %s\n\ntest:\n%s" % (out_t, pformat(test))) + + res_v, out_v = self.Vxlan(test) + print " Vxlan = ", res_v if self.vxlan_enabled: self.assertTrue(res_v, "VxlanTest failed:\n %s\n\ntest:\n%s" % (out_v, pformat(test))) else: self.assertFalse(res_v, "VxlanTest: vxlan works, but it must have been disabled!\n\ntest:%s" % pformat(test)) - self.assertTrue(res_f, "RegularLAGtoVLAN test failed:\n %s\n\ntest:\n%s" % (out_f, pformat(test))) - self.assertTrue(res_t, "RegularVLANtoLAG test failed:\n %s\n\ntest:\n%s" % (out_t, pformat(test))) except AssertionError as e: err = str(e) trace = traceback.format_exc() @@ -248,34 +366,45 @@ def work_test(self): def runTest(self): - print # Warm-up first self.warmup() # test itself self.work_test() - def Vxlan(self, test, wu = False): + def Vxlan(self, test): + for i, n in enumerate(test['acc_ports']): + for j, a in enumerate(test['acc_ports']): + res, out = self.checkVxlan(a, n, test) + if not res: + return False, out + " | net_port_rel(acc)=%d acc_port_rel=%d" % (i, j) + for i, n in enumerate(self.net_ports): for j, a in enumerate(test['acc_ports']): res, out = self.checkVxlan(a, n, test) - if not res and not wu: + if not res: return False, out + " | net_port_rel=%d acc_port_rel=%d" % (i, j) return True, "" def RegularLAGtoVLAN(self, test, wu = False): for i, n in enumerate(self.net_ports): for j, a in enumerate(test['acc_ports']): - res, out = self.checkRegularRegularLAGtoVLAN(a, n, test) + res, out = self.checkRegularRegularLAGtoVLAN(a, n, test, wu) + if wu: + #Wait a short time for building FDB and ARP table + time.sleep(0.5) if not res and not wu: return False, out + " | net_port_rel=%d acc_port_rel=%d" % (i, j) + #We only loop all acc_ports in warmup + if wu: + break return True, "" - def RegularVLANtoLAG(self, test, wu = False): + def RegularVLANtoLAG(self, test): for i, (dst, ports) in enumerate(self.pc_info): for j, a in enumerate(test['acc_ports']): res, out = self.checkRegularRegularVLANtoLAG(a, ports, dst, test) - if not res and not wu: + if not res: return False, out + " | pc_info_rel=%d acc_port_rel=%d" % (i, j) return True, "" @@ -301,9 +430,10 @@ def checkRegularRegularVLANtoLAG(self, acc_port, pc_ports, dst_ip, test): exp_packet = Mask(exp_packet) exp_packet.set_do_not_care_scapy(scapy.Ether, "dst") + self.dataplane.flush() for i in xrange(self.nr): testutils.send_packet(self, acc_port, packet) - nr_rcvd = testutils.count_matched_packets_all_ports(self, exp_packet, pc_ports, timeout=0.2) + nr_rcvd = count_matched_packets_all_ports_helper(self, exp_packet, self.nr, pc_ports, timeout=20) rv = nr_rcvd == self.nr out = "" if not rv: @@ -312,7 +442,7 @@ def checkRegularRegularVLANtoLAG(self, acc_port, pc_ports, dst_ip, test): return rv, out - def checkRegularRegularLAGtoVLAN(self, acc_port, net_port, test): + def checkRegularRegularLAGtoVLAN(self, acc_port, net_port, test, wu): src_mac = self.random_mac dst_mac = self.dut_mac src_ip = test['src_ip'] @@ -333,9 +463,14 @@ def checkRegularRegularLAGtoVLAN(self, acc_port, net_port, test): ip_ttl = 63, ) + self.dataplane.flush() for i in xrange(self.nr): testutils.send_packet(self, net_port, packet) - nr_rcvd = testutils.count_matched_packets(self, exp_packet, acc_port, timeout=0.2) + # We don't care if expected packet is received during warming up + if not wu: + nr_rcvd = count_matched_packets_helper(self, exp_packet, self.nr, acc_port, timeout=20) + else: + nr_rcvd = 0 rv = nr_rcvd == self.nr out = "" if not rv: @@ -370,9 +505,11 @@ def checkVxlan(self, acc_port, net_port, test): vxlan_vni=test['vni'], inner_frame=inpacket ) + + self.dataplane.flush() for i in xrange(self.nr): testutils.send_packet(self, net_port, packet) - nr_rcvd = testutils.count_matched_packets(self, inpacket, acc_port, timeout=0.2) + nr_rcvd = count_matched_packets_helper(self, inpacket, self.nr, acc_port, timeout=20) rv = nr_rcvd == self.nr out = "" if not rv: diff --git a/ansible/roles/test/files/ptftests/wr_arp.py b/ansible/roles/test/files/ptftests/wr_arp.py index 5115c3f72f5..531444be56e 100644 --- a/ansible/roles/test/files/ptftests/wr_arp.py +++ b/ansible/roles/test/files/ptftests/wr_arp.py @@ -12,6 +12,7 @@ import datetime import traceback import sys +import socket import threading from collections import defaultdict from pprint import pprint @@ -22,13 +23,14 @@ from ptf import config import ptf.dataplane as dataplane import ptf.testutils as testutils +from device_connection import DeviceConnection class ArpTest(BaseTest): def __init__(self): BaseTest.__init__(self) - log_file_name = '/root/wr_arp_test.log' + log_file_name = '/tmp/wr_arp_test.log' self.log_fp = open(log_file_name, 'a') self.log_fp.write("\nNew test:\n") @@ -46,6 +48,7 @@ def log(self, message): current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") print "%s : %s" % (current_time, message) self.log_fp.write("%s : %s\n" % (current_time, message)) + self.log_fp.flush() return @@ -59,19 +62,14 @@ def cmd(self, cmds): return stdout, stderr, return_code - def ssh(self, cmds): - ssh_cmds = ["ssh", "-oStrictHostKeyChecking=no", "-oServerAliveInterval=2", "admin@" + self.dut_ssh] - ssh_cmds.extend(cmds) - stdout, stderr, return_code = self.cmd(ssh_cmds) - if stdout != []: - self.log("stdout from dut: '%s'" % str(stdout)) - if stderr != []: - self.log("stderr from dut '%s'" % str(stderr)) - self.log("return code from dut: '%s'" % str(return_code)) + def dut_exec_cmd(self, cmd): + self.log("Executing cmd='{}'".format(cmd)) + stdout, stderr, return_code = self.dut_connection.execCommand(cmd, timeout=30) + self.log("return_code={}, stdout={}, stderr={}".format(return_code, stdout, stderr)) if return_code == 0: return True, str(stdout) - elif return_code == 255 and 'Timeout, server' in stderr and 'not responding' in stderr: + elif return_code == 255: return True, str(stdout) else: return False, "return code: %d. stdout = '%s' stderr = '%s'" % (return_code, str(stdout), str(stderr)) @@ -81,14 +79,14 @@ def dut_thr(self, q_from, q_to): cmd = q_from.get() if cmd == 'WR': self.log("Rebooting remote side") - res, res_text = self.ssh(["sudo", "warm-reboot", "-c", self.ferret_ip]) + res, res_text = self.dut_exec_cmd("sudo warm-reboot -c {}".format(self.ferret_ip)) if res: q_to.put('ok: %s' % res_text) else: q_to.put('error: %s' % res_text) elif cmd == 'uptime': self.log("Check uptime remote side") - res, res_text = self.ssh(["uptime", "-s"]) + res, res_text = self.dut_exec_cmd("uptime -s") if res: q_to.put('ok: %s' % res_text) else: @@ -102,11 +100,22 @@ def dut_thr(self, q_from, q_to): self.log("Quiting from dut_thr") return + def test_port_thr(self): + self.log("test_port_thr started") + while time.time() < self.stop_at: + for test in self.tests: + for port in test['acc_ports']: + nr_rcvd = self.testPort(port) + self.records[port][time.time()] = nr_rcvd + self.log("Quiting from test_port_thr") + return + def readMacs(self): addrs = {} for intf in os.listdir('/sys/class/net'): - with open('/sys/class/net/%s/address' % intf) as fp: - addrs[intf] = fp.read().strip() + if os.path.isdir('/sys/class/net/%s' % intf): + with open('/sys/class/net/%s/address' % intf) as fp: + addrs[intf] = fp.read().strip() return addrs @@ -182,6 +191,9 @@ def setUp(self): config = self.get_param('config_file') self.ferret_ip = self.get_param('ferret_ip') self.dut_ssh = self.get_param('dut_ssh') + self.dut_username = self.get_param('dut_username') + self.dut_password = self.get_param('dut_password') + self.dut_connection = DeviceConnection(self.dut_ssh, username=self.dut_username, password=self.dut_password) self.how_long = int(self.get_param('how_long', required=False, default=300)) if not os.path.isfile(config): @@ -241,22 +253,25 @@ def runTest(self): self.req_dut('quit') self.assertTrue(False, "DUT returned error for first uptime request") - records = defaultdict(dict) - stop_at = time.time() + self.how_long - rebooted = False - while time.time() < stop_at: - for test in self.tests: - for port in test['acc_ports']: - nr_rcvd = self.testPort(port) - records[port][time.time()] = nr_rcvd - if not rebooted: - result = self.req_dut('WR') - if result.startswith('ok'): - rebooted = True - else: - self.log("Error in WR") - self.req_dut('quit') - self.assertTrue(False, "Error in WR") + self.records = defaultdict(dict) + self.stop_at = time.time() + self.how_long + + test_port_thr = threading.Thread(target=self.test_port_thr) + test_port_thr.setDaemon(True) + test_port_thr.start() + + self.log("Issuing WR command") + result = self.req_dut('WR') + if result.startswith('ok'): + self.log("WR OK!") + else: + self.log("Error in WR") + self.req_dut('quit') + self.assertTrue(False, "Error in WR") + + self.assertTrue(time.time() < self.stop_at, "warm-reboot took to long") + + test_port_thr.join() uptime_after = self.req_dut('uptime') if uptime_after.startswith('error'): @@ -272,7 +287,7 @@ def runTest(self): # check that every port didn't have pauses more than 25 seconds pauses = defaultdict(list) - for port, data in records.items(): + for port, data in self.records.items(): was_active = True last_inactive = None for t in sorted(data.keys()): diff --git a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py index 91967641f00..fbb75e7b184 100644 --- a/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py +++ b/ansible/roles/test/files/tools/loganalyzer/loganalyzer.py @@ -32,7 +32,7 @@ comment_key = '#' system_log_file = '/var/log/syslog' -#-- List of ERROR codes to be returned by LogAnalyzer +#-- List of ERROR codes to be returned by AnsibleLogAnalyzer err_duplicate_start_marker = -1 err_duplicate_end_marker = -2 err_no_end_marker = -3 @@ -40,7 +40,7 @@ err_invalid_string_format = -5 err_invalid_input = -6 -class LogAnalyzer: +class AnsibleLogAnalyzer: ''' @summary: Overview of functionality @@ -52,10 +52,10 @@ class LogAnalyzer: AND will not match set of 'ignore' regex expressions, will be considered a 'match' and will be reported. - LogAnalyzer will be called initially before any test has ran, and will be + AnsibleLogAnalyzer will be called initially before any test has ran, and will be instructed to place 'start' marker into all log files to be analyzed. - When tests have ran, LogAnalyzer will be instructed to place end-marker - into the log files. After this, LogAnalyzer will be invoked to perform the + When tests have ran, AnsibleLogAnalyzer will be instructed to place end-marker + into the log files. After this, AnsibleLogAnalyzer will be invoked to perform the analysis of logs. The analysis will be performed on specified log files. For each log file only the content between start/end markers will be analyzed. @@ -109,25 +109,41 @@ def create_end_marker(self): return self.end_marker_prefix + "-" + self.run_id #--------------------------------------------------------------------- - def place_marker(self, log_file_list, marker): + def place_marker_to_file(self, log_file, marker): ''' @summary: Place marker into each log file specified. + @param log_file : File path, to be applied with marker. + @param marker: Marker to be placed into log files. + ''' + if not len(log_file) or self.is_filename_stdin(log_file): + self.print_diagnostic_message('Log file {} not found. Skip adding marker.'.format(log_file)) + self.print_diagnostic_message('log file:{}, place marker {}'.format(log_file, marker)) + with open(log_file, 'a') as file: + file.write(marker) + file.write('\n') + file.flush() + + def place_marker_to_syslog(self, marker): + ''' + @summary: Place marker into '/dev/log'. + @param marker: Marker to be placed into syslog. + ''' + + syslogger = self.init_sys_logger() + syslogger.info(marker) + syslogger.info('\n') + + def place_marker(self, log_file_list, marker): + ''' + @summary: Place marker into '/dev/log' and each log file specified. @param log_file_list : List of file paths, to be applied with marker. @param marker: Marker to be placed into log files. ''' for log_file in log_file_list: - if not len(log_file) or self.is_filename_stdin(log_file): - continue - self.print_diagnostic_message('log file:%s, place marker %s'%(log_file, marker)) - with open(log_file, 'a') as file: - file.write(marker) - file.write('\n') - file.flush() + self.place_marker_to_file(log_file, marker) - syslogger = self.init_sys_logger() - syslogger.info(marker) - syslogger.info('\n') + self.place_marker_to_syslog(marker) return #--------------------------------------------------------------------- @@ -187,11 +203,15 @@ def create_msg_regex(self, file_lsit): skipinitialspace=True) for index, row in enumerate(csvreader): + row = [item for item in row if item != ""] self.print_diagnostic_message('[diagnostic]:processing row:%d' % index) self.print_diagnostic_message('row:%s'% row) try: - #-- Ignore commented Lines and Empty Lines - if (not row or row[0].startswith(comment_key)): + #-- Ignore Empty Lines + if not row: + continue + #-- Ignore commented Lines + if row[0].startswith(comment_key): self.print_diagnostic_message('[diagnostic]:skipping row[0]:%s' % row[0]) continue @@ -206,13 +226,10 @@ def create_msg_regex(self, file_lsit): 'must be \'s\'(string) or \'r\'(regex)' %(filename,index)) - #-- One error message per line - error_string = row[1] - if (is_regex): - messages_regex.append(error_string) + messages_regex.extend(row[1:]) else: - messages_regex.append(self.error_to_regx(error_string)) + messages_regex.append(self.error_to_regx(row[1:])) except Exception as e: print 'ERROR: line %d is formatted incorrectly in file %s. Skipping line' % (index, filename) @@ -397,10 +414,11 @@ def usage(): print ' init - initialize analysis by placing start-marker' print ' to all log files specified in --logs parameter.' print ' analyze - perform log analysis of files specified in --logs parameter.' + print ' add_end_marker - add end marker to all log files specified in --logs parameter.' print '--out_dir path Directory path where to place output files, ' print ' must be present when --action == analyze' print '--logs path{,path} List of full paths to log files to be analyzed.' - print ' Implicetly system log file will be also processed' + print ' Implicitly system log file will be also processed' print '--run_id string String passed to loganalyzer, uniquely identifying ' print ' analysis session. Used to construct start/end markers. ' print '--match_files_in path{,path} List of paths to files containing strings. A string from log file' @@ -430,6 +448,8 @@ def check_action(action, log_files_in, out_dir, match_files_in, ignore_files_in, if (action == 'init'): ret_code = True + elif (action == 'add_end_marker'): + ret_code = True elif (action == 'analyze'): if out_dir is None or len(out_dir) == 0: print 'ERROR: missing required out_dir for analyze action' @@ -504,12 +524,10 @@ def write_result_file(run_id, out_dir, analysis_result_per_file, messages_regex_ out_file.write('Total matches:%d\n' % match_cnt) # Find unused regex matches for regex in messages_regex_e: - regex_used = False for line in expected_lines_total: if re.search(regex, line): - regex_used = True break - if not regex_used: + else: unused_regex_messages.append(regex) out_file.write('Total expected and found matches:%d\n' % expected_cnt) @@ -519,7 +537,6 @@ def write_result_file(run_id, out_dir, analysis_result_per_file, messages_regex_ out_file.write("\n-------------------------------------------------\n\n") out_file.flush() - #--------------------------------------------------------------------- def write_summary_file(run_id, out_dir, analysis_result_per_file, unused_regex_messages): @@ -615,7 +632,7 @@ def main(argv): usage() sys.exit(err_invalid_input) - analyzer = LogAnalyzer(run_id, verbose, start_marker) + analyzer = AnsibleLogAnalyzer(run_id, verbose, start_marker) log_file_list = filter(None, log_files_in.split(tokenizer)) @@ -643,6 +660,9 @@ def main(argv): unused_regex_messages = [] write_result_file(run_id, out_dir, result, messages_regex_e, unused_regex_messages) write_summary_file(run_id, out_dir, result, unused_regex_messages) + elif (action == "add_end_marker"): + analyzer.place_marker(log_file_list, analyzer.create_end_marker()) + return 0 else: print 'Unknown action:%s specified' % action diff --git a/ansible/roles/test/tasks/advanced-reboot.yml b/ansible/roles/test/tasks/advanced-reboot.yml index 69d6258479a..f3fa18742e8 100644 --- a/ansible/roles/test/tasks/advanced-reboot.yml +++ b/ansible/roles/test/tasks/advanced-reboot.yml @@ -4,7 +4,7 @@ - block: - name: figure out vm hosts testbed_vm_info: base_vm={{ vm }} topo={{ testbed_type }} - connection: local + delegate_to: localhost - set_fact: vm_hosts: "{{ neighbor_eosvm_mgmt.values() }}" @@ -38,7 +38,7 @@ when: None not in inboot_list - name: Validate preboot and inboot list - include: roles/test/tasks/advanced_reboot/validate_sad_list.yml + include_tasks: roles/test/tasks/advanced_reboot/validate_sad_list.yml with_items: "{{ preboot_list + inboot_list }}" when: item and ':' in item @@ -148,10 +148,10 @@ allow_vlan_flooding: "{{ allow_vlan_flooding | default('false') | bool }}" sniff_time_incr: "{{ sniff_time_incr | default(60) | int }}" - - include: advanced_reboot/reboot-image-handle.yml + - include_tasks: advanced_reboot/reboot-image-handle.yml when: new_sonic_image is defined - - include: advanced_reboot/upgrade_mlnx_fw.yml + - include_tasks: advanced_reboot/upgrade_mlnx_fw.yml when: - new_sonic_image is defined - reboot_type == "fast-reboot" @@ -169,7 +169,7 @@ set_fact: sad_list="{{ preboot_list }}" when: (None in preboot_list) and (None in inboot_list) - - include: ptf_runner_reboot.yml + - include_tasks: ptf_runner_reboot.yml with_items: "{{ sad_list }}" always: diff --git a/ansible/roles/test/tasks/interface.yml b/ansible/roles/test/tasks/interface.yml index a61ef3c1769..b0b51531f75 100644 --- a/ansible/roles/test/tasks/interface.yml +++ b/ansible/roles/test/tasks/interface.yml @@ -8,16 +8,17 @@ - debug: msg="Found link down ports {{ansible_interface_link_down_ports}}, reload SONiC and reenable down ports" - name: reboot - include: common_tasks/reboot_sonic.yml + include_tasks: common_tasks/reboot_sonic.yml - name: figure out fanout switch port in case it was down conn_graph_facts: host={{ inventory_hostname }} - connection: local + delegate_to: localhost - set_fact: neighbors="{{device_conn}}" - - include: resume_fanout_ports.yml - with_items: ansible_interface_link_down_ports + - include_tasks: resume_fanout_ports.yml + with_items: "{{ ansible_interface_link_down_ports }}" + ignore_errors: yes - name: pause and wait interface to be up pause: seconds=30 @@ -32,17 +33,16 @@ - debug: msg="Found link down ports {{ansible_interface_link_down_ports}}" when: ansible_interface_link_down_ports | length > 0 -- block: - - name: Verify interfaces are up correctly - assert: { that: "{{ ansible_interface_link_down_ports | length }} == 0" } - - rescue: - - include: check_fanout_interfaces.yml - vars: - check_fanout: true +- name: Verify interfaces are up correctly + assert: { that: "{{ ansible_interface_link_down_ports | length }} == 0" } - - fail: msg="Not all interfaces are up" + #rescue: + #- include: check_fanout_interfaces.yml + # vars: + # check_fanout: true + #- fail: msg="Not all interfaces are up" + - block: - name: Verify port channel interfaces are up correctly assert: { that: "'{{ ansible_interface_facts[item]['active'] }}' == 'True'" } diff --git a/ansible/roles/test/tasks/resume_fanout_ports.yml b/ansible/roles/test/tasks/resume_fanout_ports.yml index 9226c5e7eb0..7042da3ce32 100644 --- a/ansible/roles/test/tasks/resume_fanout_ports.yml +++ b/ansible/roles/test/tasks/resume_fanout_ports.yml @@ -20,8 +20,5 @@ intfs_to_exclude: "{{interface}}" - name: bring up neighbor interface {{neighbor_interface}} on {{peer_host}} - action: apswitch template=neighbor_interface_no_shut_single.j2 - args: - host: "{{peer_host}}" - login: "{{switch_login[hwsku_map[peer_hwsku]]}}" - connection: switch + shell: config interface startup {{ neighbor_interface }} + become: true diff --git a/ansible/roles/test/tasks/sonic.yml b/ansible/roles/test/tasks/sonic.yml index 47c55dd3ffe..cbfd6927796 100644 --- a/ansible/roles/test/tasks/sonic.yml +++ b/ansible/roles/test/tasks/sonic.yml @@ -30,7 +30,7 @@ - fail: msg="You didn't provide testbed_name=yourtestbedname, so will run by test by tag. Please specify tests you want to run using --tags" when: tags is not defined - - include: test_sonic_by_tag.yml + - include_tasks: test_sonic_by_tag.yml when: - testbed_name is not defined diff --git a/ansible/roles/vm_set/library/kickstart.py b/ansible/roles/vm_set/library/kickstart.py index 8d1dbec4235..2b6c7f7234d 100644 --- a/ansible/roles/vm_set/library/kickstart.py +++ b/ansible/roles/vm_set/library/kickstart.py @@ -1,5 +1,6 @@ #!/usr/bin/python +import datetime from telnetlib import Telnet @@ -163,7 +164,8 @@ def session(new_params): ('aaa root secret 0 %s' % str(new_params['new_root_password']), [r'\(config\)#']), ] - debug = MyDebug('/tmp/debug.%s.txt' % new_params['hostname'], enabled=True) + curtime = datetime.datetime.now().isoformat() + debug = MyDebug('/tmp/debug.%s.%s.txt' % (new_params['hostname'], curtime), enabled=True) ss = SerialSession(new_params['telnet_port'], debug) ss.login(new_params['login'], new_params['password']) ss.enable() @@ -202,15 +204,15 @@ def main(): except ELoginPromptNotFound: result = {'kickstart_code': -1, 'changed': False, 'msg': 'Login prompt not found'} except EWrongDefaultPassword: - result = {'kickstart_code': 0, 'changed': False, 'msg': 'Wrong default password, kickstart of VM has been done'} + result = {'kickstart_code': -2, 'changed': False, 'msg': 'Wrong default password, kickstart of VM has been done'} except EOFError: - result = {'kickstart_code': -2, 'changed': False, 'msg': 'EOF during the chat'} + result = {'kickstart_code': -3, 'changed': False, 'msg': 'EOF during the chat'} except EMatchNotFound: - result = {'kickstart_code': -3, 'changed': False, 'msg': "Match for output isn't found"} + result = {'kickstart_code': -4, 'changed': False, 'msg': "Match for output isn't found"} except ENotInEnabled: - module.fail_json(msg='Not in enabled mode') + result = {'kickstart_code': -5, 'changed': False, 'msg': "Not in enabled mode"} except Exception, e: - module.fail_json(msg=str(e)) + result = {'kickstart_code': -6, 'changed': False, 'msg': str(e)} module.exit_json(**result) diff --git a/ansible/roles/vm_set/library/sonic_kickstart.py b/ansible/roles/vm_set/library/sonic_kickstart.py index d317e5d778a..cf19fdfe123 100644 --- a/ansible/roles/vm_set/library/sonic_kickstart.py +++ b/ansible/roles/vm_set/library/sonic_kickstart.py @@ -1,5 +1,6 @@ #!/usr/bin/python +import datetime from telnetlib import Telnet @@ -41,7 +42,7 @@ def __init__(self, port, debug): self.d = debug self.d.debug('Starting') self.tn = Telnet('127.0.0.1', port) - self.tn.write('\r\n') + self.tn.write(b"\r\n") return @@ -58,12 +59,12 @@ def cleanup(self): return - def pair(self, action, wait_for, timeout): + def pair(self, action, wait_for, timeout=60): self.d.debug('output: %s' % action) self.d.debug('match: %s' % ",".join(wait_for)) - self.tn.write("%s\n" % action) + self.tn.write(b"%s\n" % action.encode('ascii')) if wait_for is not None: - index, match, text = self.tn.expect(wait_for, timeout) + index, match, text = self.tn.expect([ x.encode('ascii') for x in wait_for ], timeout) self.d.debug('Result of matching: %d %s %s' % (index, str(match), text)) if index == -1: raise EMatchNotFound @@ -79,37 +80,47 @@ def login(self, user, passwords): break for password in passwords: - index = self.pair(user, [r'assword:', r'\$'], 20) + index = self.pair(user, [r'assword:', r'\$']) if index == 0: - index = self.pair(password, [r'login:', r'\$'], 10) + index = self.pair(password, [r'login:', r'\$']) if index == 1: break return def configure(self, seq): - self.pair('sudo bash', [r'#'], 10) - for action, wait_for in seq: - self.pair(action, wait_for, 10) - self.pair('exit', [r'\$'], 10) + self.pair('sudo bash', [r'#']) + for cmd in seq: + if len(cmd) == 2: + (action, wait_for) = cmd + self.pair(action, wait_for) + else: + (action, wait_for, timeout) = cmd + self.pair(action, wait_for, timeout) + self.pair('exit', [r'\$']) return def logout(self): - self.pair('exit', [r'login:'], 10) + self.pair('exit', [r'login:']) return def session(new_params): seq = [ + ('while true; do if [ $(systemctl is-active swss) == "active" ]; then break; fi; echo $(systemctl is-active swss); sleep 1; done', [r'#'], 180), + ('pkill dhclient', [r'#']), ('hostname %s' % str(new_params['hostname']), [r'#']), ('sed -i s:sonic:%s: /etc/hosts' % str(new_params['hostname']), [r'#']), ('ifconfig eth0 %s' % str(new_params['mgmt_ip']), [r'#']), + ('ifconfig eth0', [r'#']), ('ip route add 0.0.0.0/0 via %s table default' % str(new_params['mgmt_gw']), [r'#']), + ('ip route', [r'#']), ('echo %s:%s | chpasswd' % (str(new_params['login']), str(new_params['new_password'])), [r'#']), ] - debug = MyDebug('/tmp/debug.%s.txt' % new_params['hostname'], enabled=True) + curtime = datetime.datetime.now().isoformat() + debug = MyDebug('/tmp/debug.%s.%s.txt' % (new_params['hostname'], curtime), enabled=True) ss = SerialSession(new_params['telnet_port'], debug) ss.login(new_params['login'], new_params['passwords']) ss.configure(seq) @@ -143,7 +154,7 @@ def main(): result = {'kickstart_code': -1, 'changed': False, 'msg': 'EOF during the chat'} except EMatchNotFound: result = {'kickstart_code': -1, 'changed': False, 'msg': "Match for output isn't found"} - except Exception, e: + except Exception as e: module.fail_json(msg=str(e)) module.exit_json(**result) diff --git a/ansible/roles/vm_set/library/vlan_port.py b/ansible/roles/vm_set/library/vlan_port.py index 9c6537c72bc..4509d5a2090 100644 --- a/ansible/roles/vm_set/library/vlan_port.py +++ b/ansible/roles/vm_set/library/vlan_port.py @@ -1,5 +1,6 @@ #!/usr/bin/python +import itertools import re import sys import time @@ -121,18 +122,25 @@ def main(): module = AnsibleModule(argument_spec=dict( cmd=dict(required=True, choices=['create', 'remove', 'list']), - external_port = dict(required=True, type='str'), + external_port=dict(required=True, type='str'), vlan_ids=dict(required=True, type='list'), + is_multi_duts=dict(required=False, type='bool', default=False), )) cmd = module.params['cmd'] external_port = module.params['external_port'] vlan_ids = module.params['vlan_ids'] - vlan_ids.sort() + is_multi_duts = module.params['is_multi_duts'] + + _vlan_ids = vlan_ids + if is_multi_duts: + # flatten the list in the case of multi-DUTs + _vlan_ids = list(itertools.chain.from_iterable(_vlan_ids)) + _vlan_ids.sort() fp_ports = [] - vp = VlanPort(external_port, vlan_ids) + vp = VlanPort(external_port, _vlan_ids) vp.up_external_port() if cmd == "create": @@ -140,10 +148,17 @@ def main(): elif cmd == "remove": vp.remove_vlan_ports() - for vlan_id in vlan_ids: - fp_ports.append("%s.%d" % (external_port, vlan_id)) + fp_port_templ = external_port + ".%s" + if is_multi_duts: + fp_ports = [] + for dut_vlans in vlan_ids: + dut_vlans.sort() + fp_ports.append([fp_port_templ % vid for vid in dut_vlans]) + else: + fp_ports = [fp_port_templ % vid for vid in vlan_ids] module.exit_json(changed=False, ansible_facts={'dut_fp_ports': fp_ports}) + if __name__ == "__main__": main() diff --git a/ansible/roles/vm_set/tasks/add_topo.yml b/ansible/roles/vm_set/tasks/add_topo.yml index 88274ec4bb1..4904de3f8ab 100644 --- a/ansible/roles/vm_set/tasks/add_topo.yml +++ b/ansible/roles/vm_set/tasks/add_topo.yml @@ -18,8 +18,8 @@ username: "{{ docker_registry_username }}" password: "{{ docker_registry_password }}" name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}:{{ ptf_imagetag }}" - pull: always + image: "{{ docker_registry_host }}/{{ ptf_imagename }}" + pull: missing state: reloaded net: none detach: True diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 5fab767b05d..a3750b5b08c 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -190,10 +190,10 @@ include: disconnect_vms.yml when: action == 'disconnect_vms' -- name: Start SONiC VM - include: start_sonic_vm.yml - when: action == 'start_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' +# - name: Start SONiC VM +# include: start_sonic_vm.yml +# when: action == 'start_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' -- name: Stop SONiC VM - include: stop_sonic_vm.yml - when: action == 'stop_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' +# - name: Stop SONiC VM +# include: stop_sonic_vm.yml +# when: action == 'stop_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' diff --git a/ansible/roles/vm_set/tasks/start_sonic_vm.yml b/ansible/roles/vm_set/tasks/start_sonic_vm.yml index 35ad1703231..ebc70a10e89 100644 --- a/ansible/roles/vm_set/tasks/start_sonic_vm.yml +++ b/ansible/roles/vm_set/tasks/start_sonic_vm.yml @@ -1,9 +1,15 @@ +- name: Print mess + debug: msg="{{ hostvars }}" + - name: Create directory for vm images and vm disks file: path={{ item }} state=directory mode=0755 with_items: - "sonic-vm/images" - "sonic-vm/disks" +- name: Print mess + debug: msg="{{ hostvars }}" + - set_fact: src_disk_image: "{{ home_path }}/sonic-vm/images/sonic-vs.img" disk_image: "{{ home_path }}/sonic-vm/disks/sonic_{{ dut_name }}.img" diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index 29c95ac7507..d7ac633a7ea 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -13,16 +13,21 @@ function usage echo " $0 [options] (connect-vms | disconnect-vms) " echo " $0 [options] config-vm " echo " $0 [options] (gen-mg | deploy-mg | test-mg) " - echo + echo " $0 [options] (create-master | destroy-master) " + echo echo "Options:" - echo " -t : testbed CSV file name (default: 'testbed.csv')" - echo " -m : virtual machine file name (default: 'veos')" + echo " -t : testbed CSV file name (default: 'testbed.csv')" + echo " -m : virtual machine file name (default: 'veos')" + echo " -k : vm type (veos|ceos) (default: 'veos')" + echo " -n : vm num (default: 0)" + echo " -s : master set identifier on specified (default: 1)" echo echo "Positional Arguments:" echo " : Hostname of server on which to start VMs" echo " : Path to file containing Ansible Vault password" echo " : Name of the target topology" echo " : Name of the Ansible inventory containing the DUT" + echo " : Server identifier in form k8s_server_{id}, corresponds to k8s-ubuntu inventory group name" echo echo "To start all VMs on a server: $0 start-vms 'server-name' ~/.password" echo "To restart a subset of VMs:" @@ -50,6 +55,8 @@ function usage echo " -e enable_data_plane_acl=true" echo " -e enable_data_plane_acl=false" echo " by default, data acl is enabled" + echo "To create Kubernetes master on a server: $0 -m k8s-ubuntu create-master 'k8s-server-name' ~/.password" + echo "To destroy Kubernetes master on a server: $0 -m k8s-ubuntu destroy-master 'k8s-server-name' ~/.password" echo echo "You should define your topology in testbed CSV file" echo @@ -58,36 +65,39 @@ function usage function read_file { - echo reading + echo reading - # Filter testbed names in the first column in the testbed definition file - line=$(cat $tbfile | grep "^$1,") + # Filter testbed names in the first column in the testbed definition file + line=$(cat $tbfile | grep "^$1,") - if [ $? -ne 0 ] - then + if [ $? -ne 0 ] + then echo "Couldn't find topology name '$1'" exit - fi + fi - NL=' + NL=' ' - case $line in - *"$NL"*) echo "Find more than one topology names in $tbfile" - exit - ;; - *) echo Found topology $1 - ;; - esac - - IFS=, read -r -a line_arr <<< $line - - testbed_name=${line_arr[1]} - topo=${line_arr[2]} - ptf_imagename=${line_arr[3]} - ptf_ip=${line_arr[4]} - server=${line_arr[5]} - vm_base=${line_arr[6]} - dut=${line_arr[7]} + case $line in + *"$NL"*) echo "Find more than one topology names in $tbfile" + exit + ;; + *) echo Found topology $1 + ;; + esac + + IFS=, read -r -a line_arr <<< $line + + testbed_name=${line_arr[1]} + topo=${line_arr[2]} + ptf_imagename=${line_arr[3]} + ptf=${line_arr[4]} + ptf_ip=${line_arr[5]} + ptf_ipv6=${line_arr[6]} + server=${line_arr[7]} + vm_base=${line_arr[8]} + dut=${line_arr[9]//;/,} + duts=${dut//[\[\] ]/} } function start_vms @@ -98,7 +108,8 @@ function start_vms shift echo "Starting VMs on server '${server}'" - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_start_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e VM_num="$vm_num" testbed_start_VMs.yml \ + --vault-password-file="${passwd}" -l "${server}" $@ } function stop_vms @@ -148,9 +159,9 @@ function add_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" -e ptf_ipv6="$ptf_ipv6" $@ - ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$duts" $@ # Delete the obsoleted arp entry for the PTF IP ip neighbor flush $ptf_ip @@ -168,7 +179,30 @@ function remove_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" -e ptf_ipv6="$ptf_ipv6" $@ + + echo Done +} + +function connect_topo +{ + topology=$1 + passwd=$2 + shift + shift + + echo "Connect to Topology '${topology}'" + + read_file ${topology} + + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_connect_topo.yml \ + --vault-password-file="${passwd}" --limit "$server" \ + -e topo_name="$topo_name" -e dut_name="$duts" \ + -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" \ + -e topo="$topo" -e vm_set_name="$testbed_name" \ + -e ptf_imagename="$ptf_imagename" -e vm_type="$vm_type" -e ptf_ipv6="$ptf_ipv6" $@ + + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$duts" $@ echo Done } @@ -183,9 +217,9 @@ function renumber_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e ptf_ipv6="$ptf_ipv6"$@ - ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$duts" $@ echo Done } @@ -196,11 +230,11 @@ function refresh_dut passwd=$2 shift shift - echo "Refresh $dut in '${topology}'" + echo "Refresh $duts in '${topology}'" read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_refresh_dut.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_refresh_dut.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" -e ptf_ipv6="$ptf_ipv6" $@ echo Done } @@ -211,7 +245,7 @@ function connect_vms read_file $1 - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_connect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_connect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" echo Done } @@ -222,12 +256,11 @@ function disconnect_vms read_file $1 - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_disconnect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_disconnect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$duts" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" echo Done } - function generate_minigraph { topology=$1 @@ -241,7 +274,7 @@ function generate_minigraph read_file $topology - ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$dut" -e testbed_name="$topology" -e testbed_file=$tbfile -e local_minigraph=true $@ + ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$duts" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e local_minigraph=true $@ echo Done } @@ -259,7 +292,7 @@ function deploy_minigraph read_file $topology - ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$dut" -e testbed_name="$topology" -e testbed_file=$tbfile -e deploy=true -e save=true $@ + ansible-playbook -i "$inventory" config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$duts" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e deploy=true -e save=true $@ echo Done } @@ -277,7 +310,7 @@ function test_minigraph read_file $topology - ansible-playbook -i "$inventory" --diff --connection=local --check config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$dut" -e testbed_name="$topology" -e testbed_file=$tbfile -e local_minigraph=true $@ + ansible-playbook -i "$inventory" --diff --connection=local --check config_sonic_basedon_testbed.yml --vault-password-file="$passfile" -l "$duts" -e testbed_name="$topology" -e testbed_file=$tbfile -e vm_file=$vmfile -e local_minigraph=true $@ echo Done } @@ -293,19 +326,50 @@ function config_vm echo Done } -function connect_topo +function start_k8s_vms { - echo "Connect to Fanout" + server=$1 + servernumber="${server#*"k8s_server_"}" + passwd=$2 + shift + shift - read_file $1 + echo "Starting Kubernetes VMs on server '${server}'" + + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_start_k8s_VMs.yml --vault-password-file="${passwd}" -e k8s="true" -l "${server}" $@ +} + +function setup_k8s_vms +{ + server=$1 + servernumber="${server#*"k8s_server_"}" + passwd=$2 + + echo "Setting up Kubernetes VMs on server '${server}'" + + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_setup_k8s_master.yml -e servernumber="${servernumber}" -e k8s="true" -e msetnumber="${msetnumber}" +} - ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="$2" -e "dut=$dut" +function stop_k8s_vms +{ + server=$1 + servernumber="${server#*"k8s_server_"}" + passwd=$2 + shift + shift + + echo "Stopping Kubernetes VMs on server '${server}'" + + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_stop_k8s_VMs.yml --vault-password-file="${passwd}" -l "${server}" -e k8s="true" $@ } vmfile=veos tbfile=testbed.csv +vm_type=veos +vm_num=0 +msetnumber=1 -while getopts "t:m:" OPTION; do +while getopts "t:m:k:n:s:" OPTION; do case $OPTION in t) tbfile=$OPTARG @@ -313,6 +377,15 @@ while getopts "t:m:" OPTION; do m) vmfile=$OPTARG ;; + k) + vm_type=$OPTARG + ;; + n) + vm_num=$OPTARG + ;; + s) + msetnumber=$OPTARG + ;; *) usage esac @@ -358,6 +431,11 @@ case "${subcmd}" in ;; test-mg) test_minigraph $@ ;; + create-master) start_k8s_vms $@ + setup_k8s_vms $@ + ;; + destroy-master) stop_k8s_vms $@ + ;; *) usage ;; esac diff --git a/ansible/testbed.csv b/ansible/testbed.csv index ac18886e0b2..1cc20230b36 100644 --- a/ansible/testbed.csv +++ b/ansible/testbed.csv @@ -1,15 +1,16 @@ -# conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,comment -cel_e1031_t0,ptf1,t0-e1031,docker-ptf,10.250.0.110/24,server_1,VM0100,cel-e1031-01,Tests ptf -cel_e1031_t1,ptf1,t1-e1031,docker-ptf,10.250.0.110/24,server_1,VM0200,cel-e1031-01,Tests ptf -cel_slx_t0,ptf2,t0-slx,docker-ptf,10.251.0.110/24,server_2,VM0300,cel-seastone-01,Tests ptf -cel_slx_t1,ptf2,t1-slx,docker-ptf,10.251.0.110/24,server_3,VM0400,cel-seastone-01,Tests ptf -ptf1-m,ptf1,ptf32,docker-ptf-sai-mlnx,10.255.0.188/24,server_1,,str-msn2700-01,Test ptf Mellanox -ptf2-b,ptf2,ptf64,docker-ptf-sai-brcm,10.255.0.189/24,server_1,,lab-s6100-01,Test ptf Broadcom -vms-sn2700-t1,vms1-1,t1,docker-ptf-sai-mlnx,10.255.0.178/24,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms -vms-sn2700-t1-lag,vms1-1,t1-lag,docker-ptf-sai-mlnx,10.255.0.178/24,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms -vms-sn2700-t0,vms1-1,t0,docker-ptf-sai-mlnx,10.255.0.178/24,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms -vms-s6000-t0,vms2-1,t0,docker-ptf-sai-brcm,10.255.0.179/24,server_1,VM0100,lab-s6000-01,Tests Dell S6000 vms -vms-a7260-t0,vms3-1,t0-116,docker-ptf-sai-brcm,10.255.0.180/24,server_1,VM0100,lab-a7260-01,Tests Arista A7260 vms -vms-s6100-t0,vms4-1,t0-64,docker-ptf-sai-brcm,10.255.0.181/24,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms -vms-s6100-t1,vms4-1,t1-64,docker-ptf-sai-brcm,10.255.0.182/24,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms -vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf-sai-brcm,10.255.0.183/24,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms +# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment +cel_slx_t0,ptf2,t0-slx,docker-ptf,ptf2,10.251.0.110/24,,server_2,VM0300,cel-seastone-01,Tests ptf +cel_slx_t1,ptf2,t1-slx,docker-ptf,ptf2,10.251.0.110/24,,server_3,VM0400,cel-seastone-01,Tests ptf +cel_e1031_t0,ptf1,t0-e1031,docker-ptf,ptf-unknown,10.250.0.110/24,,server_1,VM0100,cel-e1031-01,Tests ptf +ptf1-m,ptf1,ptf32,docker-ptf-sai-mlnx,ptf-unknown,10.255.0.188/24,,server_1,,str-msn2700-01,Test ptf Mellanox +ptf2-b,ptf2,ptf64,docker-ptf-sai-brcm,ptf-unknown,10.255.0.189/24,,server_1,,lab-s6100-01,Test ptf Broadcom +vms-sn2700-t1,vms1-1,t1,docker-ptf-sai-mlnx,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms +vms-sn2700-t1-lag,vms1-1,t1-lag,docker-ptf-sai-mlnx,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms +vms-sn2700-t0,vms1-1,t0,docker-ptf-sai-mlnx,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms +vms-s6000-t0,vms2-1,t0,docker-ptf-sai-brcm,ptf-unknown,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,Tests Dell S6000 vms +vms-a7260-t0,vms3-1,t0-116,docker-ptf-sai-brcm,ptf-unknown,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,Tests Arista A7260 vms +vms-s6100-t0,vms4-1,t0-64,docker-ptf-sai-brcm,ptf-unknown,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms +vms-s6100-t1,vms4-1,t1-64,docker-ptf-sai-brcm,ptf-unknown,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms +vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf-sai-brcm,ptf-unknown,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms +vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf-unknown,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],Example Multi DUTs testbed +vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,superman diff --git a/ansible/vars/docker_registry.yml b/ansible/vars/docker_registry.yml index bb881811f63..2e00c6f4c32 100644 --- a/ansible/vars/docker_registry.yml +++ b/ansible/vars/docker_registry.yml @@ -1,4 +1,4 @@ docker_registry_host: localhost:5000 docker_registry_username: clsnet -docker_registry_password: +docker_registry_password: sonic diff --git a/tests/conftest.py b/tests/conftest.py old mode 100644 new mode 100755 index 71316804185..1e8b452c09d --- a/tests/conftest.py +++ b/tests/conftest.py @@ -461,4 +461,4 @@ def disable_container_autorestart(duthost, request): for name, state in container_autorestart_states.items(): if state == "enabled": cmds_enable.append(cmd_enable.format(name)) - duthost.shell_cmds(cmds=cmds_enable) \ No newline at end of file + duthost.shell_cmds(cmds=cmds_enable) diff --git a/tests/run_tests.sh b/tests/run_tests.sh index 8907bf5c399..b26554746cb 100755 --- a/tests/run_tests.sh +++ b/tests/run_tests.sh @@ -141,7 +141,9 @@ function setup_test_options() PRET_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/pretest.xml --log-file=${LOG_PATH}/pretest.log" POST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/posttest.xml --log-file=${LOG_PATH}/posttest.log" - TEST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/tr.xml --log-file=${LOG_PATH}/test.log" + # TEST_LOGGING_OPTIONS="--junit-xml=${LOG_PATH}/tr.xml --log-file=${LOG_PATH}/test.log" + TEST_LOGGING_OPTIONS="--html=${LOG_PATH}/result.html --log-file=${LOG_PATH}/test.log" + fi UTIL_TOPOLOGY_OPTIONS="--topology util" if [[ -z ${TOPOLOGY} ]]; then @@ -194,7 +196,7 @@ function run_debug_tests() function prepare_dut() { echo "=== Preparing DUT for subsequent tests ===" - pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m pretest + pytest ${PYTEST_UTIL_OPTS} ${PRET_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m pretest -v # Give some delay for the newly announced routes to propagate. sleep 120 @@ -203,13 +205,13 @@ function prepare_dut() function cleanup_dut() { echo "=== Cleaning up DUT after tests ===" - pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m posttest + pytest ${PYTEST_UTIL_OPTS} ${POST_LOGGING_OPTIONS} ${UTIL_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -m posttest -v } function run_group_tests() { echo "=== Running tests in groups ===" - pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} + pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -v } function run_individual_tests() @@ -225,10 +227,11 @@ function run_individual_tests() if [[ ${test_dir} != "." ]]; then mkdir -p ${LOG_PATH}/${test_dir} fi - TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml" + # TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml" + TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --html=${LOG_PATH}/result.html" fi - pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} + pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} -v ret_code=$? # If test passed, no need to keep its log. @@ -330,4 +333,5 @@ if [[ x"${TEST_METHOD}" != x"debug" && x"${BYPASS_UTIL}" == x"False" ]]; then cleanup_dut fi +# run_debug_tests exit ${RC}