diff --git a/ansible/README.md b/ansible/README.md index 0e0ad846dab..68d7bc5b680 100644 --- a/ansible/README.md +++ b/ansible/README.md @@ -64,7 +64,6 @@ roles/ sonic_test/ # same kind of structure as above, but for the integration test role, # see http://github.com/Azure/sonic-integrationtest - sonic_vm/ # for a future, vm based deployment of sonic sonic_s6000/ # place Dell s6000 specific tasks here sonic_msn2700/ # place Mellanox msn2700 specific tasks here ``` diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml index b6d3d4a4621..8342051869a 100644 --- a/ansible/config_sonic_basedon_testbed.yml +++ b/ansible/config_sonic_basedon_testbed.yml @@ -36,7 +36,7 @@ - block: - name: Gathering testbed information - test_facts: testbed_name="{{ testbed_name }}" + test_facts: testbed_name="{{ testbed_name }}" testbed_file="{{ testbed_file }}" connection: local - fail: msg="The DUT you are trying to run test does not belongs to this testbed" @@ -92,11 +92,6 @@ intf_names: "{{ intf_names | default({}) | combine({item.key: port_alias[item.value[0]|int:item.value[-1]|int+1] }) }}" with_dict: interface_to_vms - - name: create minigraph file in ansible minigraph folder - template: src=templates/minigraph_template.j2 - dest=minigraph/{{ inventory_hostname}}.{{ topo }}.xml - connection: local - - block: - name: saved original minigraph file in SONiC DUT(ignore errors when file doesnot exist) shell: mv /etc/sonic/minigraph.xml /etc/sonic/minigraph.xml.orig diff --git a/ansible/doc/README.new.testbed.Configuration.md b/ansible/doc/README.new.testbed.Configuration.md index a5c64168dc5..2f7bf5722c1 100644 --- a/ansible/doc/README.new.testbed.Configuration.md +++ b/ansible/doc/README.new.testbed.Configuration.md @@ -74,7 +74,7 @@ For each host device that you add; define or verify the: - mgmt_bridge - mgmt_prefixlen (this should match with the mgmt_subnet_mask_length) - mgmt_gw -- external_iface +- external_port ### veos_groups section: **USAGE**: veos diff --git a/ansible/doc/README.testbed.Setup.md b/ansible/doc/README.testbed.Setup.md index 6efa604e43e..e4924d826ec 100644 --- a/ansible/doc/README.testbed.Setup.md +++ b/ansible/doc/README.testbed.Setup.md @@ -81,7 +81,7 @@ Once you are in the docker, you need to modify the testbed configuration files t - Update server management IP in [```ansible/veos```](../veos). - Update testbed server credentials in [```ansible/group_vars/vm_host/creds.yml```](../group_vars/vm_host/creds.yml). - Update server network configuration for VM and PTF management interface in [```ansible/host_vars/STR-ACS-SERV-01.yml```](../host_vars/STR-ACS-SERV-01.yml). - - ```external_iface```: server trunk port name (connected to the fanout switch) + - ```external_port```: server trunk port name (connected to the fanout switch) - ```mgmt_gw```: ip of gateway for VM mgmt interfaces - ```mgmt_prefixlen```: prefixlen for management interfaces - Check that ansible could reach this device by command ```ansible -m ping -i veos vm_host_1```. diff --git a/ansible/doc/README.testbed.VsSetup.md b/ansible/doc/README.testbed.VsSetup.md new file mode 100644 index 00000000000..627869e84b4 --- /dev/null +++ b/ansible/doc/README.testbed.VsSetup.md @@ -0,0 +1,123 @@ +# Testbed Setup + +This document describes the steps to setup the virtual switch based testbed and deploy a topology. + +## Prepare testbed server + +- Install Ubuntu 18.04 amd64 server. To setup a T0 topology, the server needs to have 10GB free memory. +- Setup internal management network. + +``` +brctl addbr br1 +ifconfig br1 10.250.0.1/24 +ifconfig br1 up +``` + +- Download vEOS image from [arista](https://www.arista.com/en/support/software-download). +- Copy below image files to ```~/veos-vm/images``` on your testbed server. + - ```Aboot-veos-serial-8.0.0.iso``` + - ```vEOS-lab-4.15.9M.vmdk``` + +## Setup docker registry for *PTF* docker + +PTF docker is used to send and receive packets to test data plane. + +- Build PTF docker +``` +git clone --recursive https://github.com/Azure/sonic-buildimage.git +make configure PLATFORM=generic +make target/docker-ptf.gz +``` + +- Setup [docker registry](https://docs.docker.com/registry/) and upload *docker-ptf* to the docker registry. + +## Build or download *sonic-mgmt* docker image + +ansible playbook in *sonic-mgmt* repo requires to setup ansible and various dependencies. +We have built a *sonic-mgmt* docker that installs all dependencies, and you can build +the docker and run ansible playbook inside the docker. + +- Build *sonic-mgmt* docker +``` +git clone --recursive https://github.com/Azure/sonic-buildimage.git +make configure PLATFORM=generic +make target/docker-sonic-mgmt.gz +``` + +Pre-built *sonic-mgmt* can also be downloaded from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/bldenv/job/docker-sonic-mgmt/lastSuccessfulBuild/artifact/target/docker-sonic-mgmt.gz). + +## Download sonic-vs image + +- Download sonic-vs image from [here](https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-image/lastSuccessfulBuild/artifact/target/sonic-vs.img.gz) +- unzip the image and move it into ```~/sonic-vm/images/``` + +## Clone sonic-mgmt repo + +``` +git clone https://github.com/Azure/sonic-mgmt +``` + +- Modify veos.vtb to use the user name to login linux host. Add public key authorized\_keys for your user. +Put the private key inside the sonic-mgmt docker container. Make sure you can login into box using +```ssh yourusername@172.17.0.1``` without any password prompt inside the docker container. + +``` +lgh@gulv-vm2:/data/sonic/sonic-mgmt/ansible$ git diff +diff --git a/ansible/veos.vtb b/ansible/veos.vtb +index 4ea5a7a..4cfc448 100644 +--- a/ansible/veos.vtb ++++ b/ansible/veos.vtb +@@ -1,5 +1,5 @@ +[vm_host_1] +-STR-ACS-VSERV-01 ansible_host=172.17.0.1 ansible_user=use_own_value ++STR-ACS-VSERV-01 ansible_host=172.17.0.1 ansible_user=lgh + + [vm_host:children] +vm_host_1 +``` + +## Run sonic-mgmt docker + +``` +docker run -v $PWD:/data -it docker-sonic-mgmt bash +``` + +From now on, all steps are running inside the *sonic-mgmt* docker. + +## Setup Arista VMs in the server + +``` +./testbed-cli.sh -m veos.vtb start-vms server_1 password.txt +``` + - please note: Here "password.txt" is the ansible vault password file name/path. Ansible allows user use ansible vault to encrypt password files. By default, this shell script require a password file. If you are not using ansible vault, just create an empty file and pass the filename to the command line. The file name and location is created and maintained by user. + +Check that all VMs are up and running: ```ansible -m ping -i veos server_1``` + +## Deploy T0 topology + +``` +./testbed-cli.sh -t vtestbed.csv -m veos.vtb add-topo vms-kvm-t0 password.txt +``` + +## Deploy minigraph on the DUT + +``` +./testbed-cli.sh -t vtestbed.csv -m veos.vtb deploy-mg vms-kvm-t0 lab password.txt +``` + +You should be login into the sonic kvm using IP: 10.250.0.101 using admin:password. +You should see BGP sessions up in sonic. + +``` +admin@vlab-01:~$ show ip bgp sum +BGP router identifier 10.1.0.32, local AS number 65100 +RIB entries 12807, using 1401 KiB of memory +Peers 8, using 36 KiB of memory +Peer groups 2, using 112 bytes of memory + +Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd +10.0.0.57 4 64600 3208 12 0 0 0 00:00:22 6400 +10.0.0.59 4 64600 3208 593 0 0 0 00:00:22 6400 +10.0.0.61 4 64600 3205 950 0 0 0 00:00:21 6400 +10.0.0.63 4 64600 3204 950 0 0 0 00:00:21 6400 +``` diff --git a/ansible/fanout_connect.yml b/ansible/fanout_connect.yml index aae95ed82d4..d6020d6852c 100644 --- a/ansible/fanout_connect.yml +++ b/ansible/fanout_connect.yml @@ -7,21 +7,23 @@ when: - dut is not defined - - set_fact: - server: "{{ inventory_hostname|lower }}" - server_port: "{{ external_iface }}" + - block: + - set_fact: + server: "{{ inventory_hostname|lower }}" + server_port: "{{ external_port }}" - - debug: msg="Connect {{ server }}:{{ server_port }} to {{ dut }}" + - debug: msg="Connect {{ server }}:{{ server_port }} to {{ dut }}" - - name: get the username running the deploy - command: whoami - connection: local - become: no - register: calling_username - changed_when: false + - name: get the username running the deploy + command: whoami + connection: local + become: no + register: calling_username + changed_when: false - - set_fact: userid={{ calling_username.stdout }} + - set_fact: userid={{ calling_username.stdout }} - - set_fact: connect_leaf=false + - set_fact: connect_leaf=false - - include: roles/fanout/tasks/rootfanout_connect.yml + - include: roles/fanout/tasks/rootfanout_connect.yml + when: external_port is defined diff --git a/ansible/group_vars/all/env.yml b/ansible/group_vars/all/env.yml new file mode 100644 index 00000000000..434335885d3 --- /dev/null +++ b/ansible/group_vars/all/env.yml @@ -0,0 +1,5 @@ +# if your lab needs http(s) proxy to access Internet +# Uncomment following section and provide correct http(s) proxy +#proxy_env: +# http_proxy: http://10.252.0.99:3128 +# https_proxy: http://10.252.0.99:3128 diff --git a/ansible/group_vars/vm_host/main.yml b/ansible/group_vars/vm_host/main.yml index a34354943d4..9f3bcceea73 100644 --- a/ansible/group_vars/vm_host/main.yml +++ b/ansible/group_vars/vm_host/main.yml @@ -1,4 +1,4 @@ -root_path: /home/azure/veos-vm +root_path: veos-vm vm_images_url: https://acsbe.blob.core.windows.net/vmimages cd_image_filename: Aboot-veos-serial-8.0.0.iso hdd_image_filename: vEOS-lab-4.15.9M.vmdk @@ -7,9 +7,3 @@ skip_image_downloading: false vm_console_base: 7000 memory: 2097152 max_fp_num: 4 - -# proxy -proxy_env: - http_proxy: http://10.252.0.99:3128 - https_proxy: http://10.252.0.99:3128 - diff --git a/ansible/host_vars/STR-ACS-SERV-01.yml b/ansible/host_vars/STR-ACS-SERV-01.yml index a2b44780eed..20e5b88b6b9 100644 --- a/ansible/host_vars/STR-ACS-SERV-01.yml +++ b/ansible/host_vars/STR-ACS-SERV-01.yml @@ -2,5 +2,5 @@ mgmt_bridge: br1 mgmt_prefixlen: 23 mgmt_gw: 10.255.0.1 vm_mgmt_gw: 10.254.0.1 -external_iface: p4p1 +external_port: p4p1 diff --git a/ansible/host_vars/STR-ACS-SERV-02.yml b/ansible/host_vars/STR-ACS-SERV-02.yml index 9c87d6ee0b6..43d0f89e1d4 100644 --- a/ansible/host_vars/STR-ACS-SERV-02.yml +++ b/ansible/host_vars/STR-ACS-SERV-02.yml @@ -1,5 +1,4 @@ mgmt_bridge: br1 mgmt_prefixlen: 23 mgmt_gw: 10.255.0.1 -external_iface: p4p1 - +external_port: p4p1 diff --git a/ansible/host_vars/STR-ACS-VSERV-01.yml b/ansible/host_vars/STR-ACS-VSERV-01.yml new file mode 100644 index 00000000000..c6e90fe3579 --- /dev/null +++ b/ansible/host_vars/STR-ACS-VSERV-01.yml @@ -0,0 +1,6 @@ +mgmt_bridge: br1 +mgmt_prefixlen: 24 +mgmt_gw: 10.250.0.1 +vm_mgmt_gw: 10.250.0.1 + +internal_mgmt_port: True diff --git a/ansible/lab b/ansible/lab index f2f8d81c484..26a84304da6 100644 --- a/ansible/lab +++ b/ansible/lab @@ -7,6 +7,7 @@ iface_speed='40000' [sonic_s6000] lab-s6000-01 ansible_host=10.251.0.189 +vlab-01 ansible_host=10.250.0.101 [sonic_s6000:vars] hwsku="Force10-S6000" diff --git a/ansible/roles/vm_set/library/kvm_port.py b/ansible/roles/vm_set/library/kvm_port.py new file mode 100644 index 00000000000..70b9c5b9ab2 --- /dev/null +++ b/ansible/roles/vm_set/library/kvm_port.py @@ -0,0 +1,57 @@ +#!/usr/bin/python + +import re +import sys +import time +import subprocess +from ansible.module_utils.basic import * + +DOCUMENTATION = ''' +module: kvm_port +version_added: "0.1" +author: Guohan Lu (gulv@microsoft.com) +short_description: Gather management and front panel ports from KVM-based DUT +''' + +EXAMPLES = ''' +- name: Get front panel and mgmt port for kvm vm + kvm_port: + vmname: "{{ dut_name }}" +''' + +def main(): + + module = AnsibleModule(argument_spec=dict( + vmname = dict(required=True), + )) + + vmname = module.params['vmname'] + + try: + output = subprocess.check_output( + "virsh domiflist %s" % vmname, + env={"LIBVIRT_DEFAULT_URI": "qemu:///system"}, + shell=True) + except subprocess.CalledProcessError: + module.fail_json(msg="failed to iflist dom %s" % vmname) + + mgmt_port = None + fp_ports = [] + + for l in output.split('\n'): + fds = re.split('\s+', l) + if len(fds) != 5: + continue + if fds[1] == "ethernet": + if mgmt_port == None: + mgmt_port = fds[0] + else: + fp_ports.append(fds[0]) + + if mgmt_port == None: + module.fail_json(msg="failed to find mgmt port") + + module.exit_json(changed=False, ansible_facts={'dut_mgmt_port': mgmt_port, 'dut_fp_ports': fp_ports}) + +if __name__ == "__main__": + main() diff --git a/ansible/roles/vm_set/library/sonic_kickstart.py b/ansible/roles/vm_set/library/sonic_kickstart.py new file mode 100644 index 00000000000..d317e5d778a --- /dev/null +++ b/ansible/roles/vm_set/library/sonic_kickstart.py @@ -0,0 +1,155 @@ +#!/usr/bin/python + +from telnetlib import Telnet + + +class MyDebug(object): + def __init__(self, filename, enabled=True): + if enabled: + self.fp = open(filename, 'w') + else: + self.fp = None + + return + + def cleanup(self): + if self.fp: + self.fp.close() + self.fp = None + + return + + def __del__(self): + self.cleanup() + + return + + def debug(self, msg): + if self.fp: + self.fp.write('%s\n' % msg) + self.fp.flush() + + return + + +class EMatchNotFound(Exception): + pass + + +class SerialSession(object): + def __init__(self, port, debug): + self.d = debug + self.d.debug('Starting') + self.tn = Telnet('127.0.0.1', port) + self.tn.write('\r\n') + + return + + def __del__(self): + self.cleanup() + + return + + def cleanup(self): + if self.tn: + self.tn.close() + self.tn = None + self.d.cleanup() + + return + + def pair(self, action, wait_for, timeout): + self.d.debug('output: %s' % action) + self.d.debug('match: %s' % ",".join(wait_for)) + self.tn.write("%s\n" % action) + if wait_for is not None: + index, match, text = self.tn.expect(wait_for, timeout) + self.d.debug('Result of matching: %d %s %s' % (index, str(match), text)) + if index == -1: + raise EMatchNotFound + else: + index = 0 + + return index + + def login(self, user, passwords): + while True: + index = self.pair('\r', [r'login:', r'assword:'], 300) + if index == 0: + break + + for password in passwords: + index = self.pair(user, [r'assword:', r'\$'], 20) + if index == 0: + index = self.pair(password, [r'login:', r'\$'], 10) + if index == 1: + break + + return + + def configure(self, seq): + self.pair('sudo bash', [r'#'], 10) + for action, wait_for in seq: + self.pair(action, wait_for, 10) + self.pair('exit', [r'\$'], 10) + + return + + def logout(self): + self.pair('exit', [r'login:'], 10) + + return + +def session(new_params): + seq = [ + ('hostname %s' % str(new_params['hostname']), [r'#']), + ('sed -i s:sonic:%s: /etc/hosts' % str(new_params['hostname']), [r'#']), + ('ifconfig eth0 %s' % str(new_params['mgmt_ip']), [r'#']), + ('ip route add 0.0.0.0/0 via %s table default' % str(new_params['mgmt_gw']), [r'#']), + ('echo %s:%s | chpasswd' % (str(new_params['login']), str(new_params['new_password'])), [r'#']), + ] + + debug = MyDebug('/tmp/debug.%s.txt' % new_params['hostname'], enabled=True) + ss = SerialSession(new_params['telnet_port'], debug) + ss.login(new_params['login'], new_params['passwords']) + ss.configure(seq) + ss.logout() + ss.cleanup() + + return + + +def core(module): + session(module.params) + + return {'kickstart_code': 0, 'changed': True, 'msg': 'Kickstart completed'} + + +def main(): + + module = AnsibleModule(argument_spec=dict( + telnet_port = dict(required=True), + login = dict(required=True), + passwords = dict(required=True, type='list'), + hostname = dict(required=True), + mgmt_ip = dict(required=True), + mgmt_gw = dict(required=True), + new_password = dict(required=True), + )) + + try: + result = core(module) + except EOFError: + result = {'kickstart_code': -1, 'changed': False, 'msg': 'EOF during the chat'} + except EMatchNotFound: + result = {'kickstart_code': -1, 'changed': False, 'msg': "Match for output isn't found"} + except Exception, e: + module.fail_json(msg=str(e)) + + module.exit_json(**result) + + return + + +from ansible.module_utils.basic import * +main() diff --git a/ansible/roles/vm_set/library/vlan_port.py b/ansible/roles/vm_set/library/vlan_port.py new file mode 100644 index 00000000000..9c6537c72bc --- /dev/null +++ b/ansible/roles/vm_set/library/vlan_port.py @@ -0,0 +1,149 @@ +#!/usr/bin/python + +import re +import sys +import time +import subprocess +from pprint import pprint +from ansible.module_utils.basic import * + +DOCUMENTATION = ''' +module: vlan_port +version_added: "0.1" +author: Guohan Lu (gulv@microsoft.com) +short_description: Get/Create/Remove vlan tunnel port in the test server for physical DUT +''' + +EXAMPLES = ''' +- name: Set front panel port for vlan tunnel + vlan_port: + external_port: "{{ external_port }}" + vlan_ids: "{{ device_vlan_list }}" + cmd: "list" +''' + +DOCUMENTATION = ''' + - external_port: external port + - vlan_ids: vlan list +''' + +CMD_DEBUG_FNAME = '/tmp/vlan_port.cmds.txt' +EXCEPTION_DEBUG_FNAME = '/tmp/vlan_port.exception.txt' + +class VlanPort(object): + def __init__(self, external_port, vlan_ids): + self.external_port = external_port + self.vlan_ids = vlan_ids + self.host_ifaces = VlanPort.ifconfig('ifconfig -a') + + return + + def up_external_port(self): + if self.external_port in self.host_ifaces: + VlanPort.iface_up(self.external_port) + + return + + def create_vlan_port(self, port, vlan_id): + vlan_port = "%s.%d" % (port, vlan_id) + if vlan_port not in self.host_ifaces: + VlanPort.cmd('vconfig add %s %d' % (port, vlan_id)) + + VlanPort.iface_up(vlan_port) + + return + + def destroy_vlan_port(self, vlan_port): + if vlan_port in self.host_ifaces: + VlanPort.iface_down(vlan_port) + VlanPort.cmd('vconfig rem %s' % vlan_port) + + return + + def create_vlan_ports(self): + for vlan_id in self.vlan_ids: + self.create_vlan_port(self.external_port, vlan_id) + + def remove_vlan_ports(self): + for vlan_id in self.vlan_ids: + vlan_port = "%s.%d" % (self.external_port, vlan_id) + self.destroy_vlan_port(vlan_port) + + @staticmethod + def ifconfig(cmdline): + out = VlanPort.cmd(cmdline) + + ifaces = set() + + rows = out.split('\n') + for row in rows: + if len(row) == 0: + continue + terms = row.split() + if not row[0].isspace(): + ifaces.add(terms[0].rstrip(':')) + + return ifaces + + @staticmethod + def iface_up(iface_name, pid=None): + return VlanPort.iface_updown(iface_name, 'up', pid) + + @staticmethod + def iface_down(iface_name, pid=None): + return VlanPort.iface_updown(iface_name, 'down', pid) + + @staticmethod + def iface_updown(iface_name, state, pid): + if pid is None: + return VlanPort.cmd('ip link set %s %s' % (iface_name, state)) + else: + return VlanPort.cmd('nsenter -t %s -n ip link set %s %s' % (pid, iface_name, state)) + + @staticmethod + def cmd(cmdline): + with open(CMD_DEBUG_FNAME, 'a') as fp: + pprint("CMD: %s" % cmdline, fp) + cmd = cmdline.split(' ') + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) + stdout, stderr = process.communicate() + ret_code = process.returncode + + if ret_code != 0: + raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline)) + + with open(CMD_DEBUG_FNAME, 'a') as fp: + pprint("OUTPUT: %s" % stdout, fp) + + return stdout + +def main(): + + module = AnsibleModule(argument_spec=dict( + cmd=dict(required=True, choices=['create', 'remove', 'list']), + external_port = dict(required=True, type='str'), + vlan_ids=dict(required=True, type='list'), + )) + + cmd = module.params['cmd'] + external_port = module.params['external_port'] + vlan_ids = module.params['vlan_ids'] + vlan_ids.sort() + + fp_ports = [] + + vp = VlanPort(external_port, vlan_ids) + + vp.up_external_port() + if cmd == "create": + vp.create_vlan_ports() + elif cmd == "remove": + vp.remove_vlan_ports() + + for vlan_id in vlan_ids: + fp_ports.append("%s.%d" % (external_port, vlan_id)) + + module.exit_json(changed=False, ansible_facts={'dut_fp_ports': fp_ports}) + +if __name__ == "__main__": + main() diff --git a/ansible/roles/vm_set/library/vm_topology.py b/ansible/roles/vm_set/library/vm_topology.py index 6cd154a1157..d8e8af640cc 100644 --- a/ansible/roles/vm_set/library/vm_topology.py +++ b/ansible/roles/vm_set/library/vm_topology.py @@ -28,7 +28,8 @@ - inserts physical vlans into the docker container to represent endhosts - binds internal interfaces of the docker container to correspoinding VM ports - connects interfaces "Ethernet9" of every VM in current vm set to each other - - connect vlan interface to bridges representing vm set fp ports + - connect dut fp ports to bridges representing vm set fp ports + - connect dut mgmt ports to mgmt bridge (option) - with cmd: 'renumber' the module: - disconnect vlan interface to bridges representing vm set fp ports - inserts mgmt interface inside of the docker container with name "ptf_{{vm_set_name}}" @@ -49,11 +50,11 @@ - topo: dictionary with VMs topology. Check vars/topo_*.yml for details - vm_names: list of VMs represented on a current host - vm_base: which VM consider the first VM in the current vm set - - vlan_base: the first vlan for the network - - mgmt_ip_addr: ip address with prefixlen for the injected docker container - - mgmt_ip_gw: default gateway for the injected docker container + - ptf_mgmt_ip_addr: ip address with prefixlen for the injected docker container + - ptf_mgmt_ip_gw: default gateway for the injected docker container - mgmt_bridge: a bridge which is used as mgmt bridge on the host - - ext_iface: physical interface which will be used for for vlan creation + - dut_fp_ports: dut ports + - dut_mgmt_port: dut mgmt port - fp_mtu: MTU for FP ports ''' @@ -64,18 +65,18 @@ vm_names: "{{ VM_hosts }}" fp_mtu: "{{ fp_mtu_size }}" -- name: Bind topology {{ topo }} to VMs. base vm = {{ VM_base }} base vlan = {{ vlan_base }} +- name: Bind topology {{ topo }} to VMs. base vm = {{ VM_base }} vm_topology: cmd: "bind" vm_set_name: "{{ vm_set_name }}" topo: "{{ topology }}" vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" - vlan_base: "{{ vlan_base }}" - mgmt_ip_addr: "{{ ptf_ip }}" - mgmt_ip_gw: "{{ mgmt_gw }}" + ptf_mgmt_ip_addr: "{{ ptf_ip }}" + ptf_mgmt_ip_gw: "{{ mgmt_gw }}" mgmt_bridge: "{{ mgmt_bridge }}" - ext_iface: "{{ external_iface }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" + dut_fp_ports: "{{ dut_fp_ports }}" fp_mtu: "{{ fp_mtu_size }}" max_fp_num: "{{ max_fp_num }} ''' @@ -113,7 +114,7 @@ def __init__(self, vm_names, fp_mtu, max_fp_num): return - def init(self, vm_set_name, topo, vm_base, vlan_base, ext_iface, ptf_exists=True): + def init(self, vm_set_name, topo, vm_base, dut_fp_ports, ptf_exists=True): self.vm_set_name = vm_set_name if 'VMs' in topo: self.VMs = topo['VMs'] @@ -134,8 +135,7 @@ def init(self, vm_set_name, topo, vm_base, vlan_base, ext_iface, ptf_exists=True else: self.host_interfaces = [] - self.vlan_base = vlan_base - self.ext_iface = ext_iface + self.dut_fp_ports = dut_fp_ports self.injected_fp_ports = self.extract_vm_vlans() @@ -152,9 +152,9 @@ def update(self): self.host_br_to_ifs, self.host_if_to_br = VMTopology.brctl('brctl show') self.host_ifaces = VMTopology.ifconfig('ifconfig -a') if self.pid is not None: - self.ctr_ifaces = VMTopology.ifconfig('nsenter -t %s -n ifconfig -a' % self.pid) + self.cntr_ifaces = VMTopology.ifconfig('nsenter -t %s -n ifconfig -a' % self.pid) else: - self.ctr_ifaces = None + self.cntr_ifaces = [] return @@ -167,22 +167,22 @@ def extract_vm_vlans(self): def create_bridges(self): for vm in self.vm_names: - for vlan_num in xrange(self.max_fp_num): - vlan_br_name = OVS_FP_BRIDGE_TEMPLATE % (vm, vlan_num) - self.create_bridge(vlan_br_name) - port1_br_name = OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' % vm - self.create_bridge(port1_br_name) + for fp_num in xrange(self.max_fp_num): + fp_br_name = OVS_FP_BRIDGE_TEMPLATE % (vm, fp_num) + self.create_bridge(fp_br_name, self.fp_mtu) + bport_br_name = OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' % vm + self.create_bridge(bport_br_name, self.fp_mtu) return - def create_bridge(self, vlan_name): - if vlan_name not in self.host_ifaces: - VMTopology.cmd('ovs-vsctl add-br %s' % vlan_name) + def create_bridge(self, bridge_name, mtu): + if bridge_name not in self.host_ifaces: + VMTopology.cmd('ovs-vsctl add-br %s' % bridge_name) - if self.fp_mtu != DEFAULT_MTU: - VMTopology.cmd('ifconfig %s mtu %d' % (vlan_name, self.fp_mtu)) + if mtu != DEFAULT_MTU: + VMTopology.cmd('ifconfig %s mtu %d' % (bridge_name, mtu)) - VMTopology.cmd('ifconfig %s up' % vlan_name) + VMTopology.cmd('ifconfig %s up' % bridge_name) return @@ -191,8 +191,8 @@ def destroy_bridges(self): for ifname in self.host_ifaces: if re.compile(OVS_FP_BRIDGE_REGEX % vm).match(ifname): self.destroy_bridge(ifname) - port1_br_name = OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' % vm - self.destroy_bridge(port1_br_name) + bport_br_name = OVS_BRIDGE_BACK_TEMPLATE = 'br-%s-back' % vm + self.destroy_bridge(bport_br_name) return @@ -237,7 +237,7 @@ def add_br_if_to_docker(self, bridge, ext_if, int_if): VMTopology.iface_up(ext_if) self.update() - if int_if in self.host_ifaces and int_if not in self.ctr_ifaces: + if int_if in self.host_ifaces and int_if not in self.cntr_ifaces: VMTopology.cmd("ip link set netns %s dev %s" % (self.pid, int_if)) VMTopology.iface_up(int_if, self.pid) @@ -246,31 +246,45 @@ def add_br_if_to_docker(self, bridge, ext_if, int_if): def add_ip_to_docker_if(self, int_if, mgmt_ip_addr, mgmt_gw): self.update() - if int_if in self.ctr_ifaces: + if int_if in self.cntr_ifaces: VMTopology.cmd("nsenter -t %s -n ip addr flush dev %s" % (self.pid, int_if)) VMTopology.cmd("nsenter -t %s -n ip addr add %s dev %s" % (self.pid, mgmt_ip_addr, int_if)) VMTopology.cmd("nsenter -t %s -n ip route add default via %s dev %s" % (self.pid, mgmt_gw, int_if)) return - def add_phy_if_to_docker(self, iface_name, vlan): - int_if = "%s.%d" % (self.ext_iface, vlan) - - if int_if not in self.host_ifaces and iface_name not in self.ctr_ifaces and int_if not in self.ctr_ifaces: - VMTopology.cmd("vconfig add %s %s" % (self.ext_iface, vlan)) + def add_dut_if_to_docker(self, iface_name, dut_iface): self.update() - if int_if in self.host_ifaces and int_if not in self.ctr_ifaces and iface_name not in self.ctr_ifaces: - VMTopology.cmd("ip link set netns %s dev %s" % (self.pid, int_if)) + if dut_iface in self.host_ifaces and dut_iface not in self.cntr_ifaces and iface_name not in self.cntr_ifaces: + VMTopology.cmd("ip link set netns %s dev %s" % (self.pid, dut_iface)) self.update() - if int_if in self.ctr_ifaces and iface_name not in self.ctr_ifaces: - VMTopology.cmd("nsenter -t %s -n ip link set dev %s name %s" % (self.pid, int_if, iface_name)) + if dut_iface in self.cntr_ifaces and iface_name not in self.cntr_ifaces: + VMTopology.cmd("nsenter -t %s -n ip link set dev %s name %s" % (self.pid, dut_iface, iface_name)) VMTopology.iface_up(iface_name, self.pid) return + def remove_dut_if_from_docker(self, iface_name, dut_iface): + + if self.pid is None: + return + + self.update() + if iface_name in self.cntr_ifaces: + VMTopology.iface_down(iface_name, self.pid) + + if iface_name in self.cntr_ifaces and dut_iface not in self.cntr_ifaces: + VMTopology.cmd("nsenter -t %s -n ip link set dev %s name %s" % (self.pid, iface_name, dut_iface)) + + self.update() + if dut_iface not in self.host_ifaces and dut_iface in self.cntr_ifaces: + VMTopology.cmd("nsenter -t %s -n ip link set netns 1 dev %s" % (self.pid, dut_iface)) + + return + def add_veth_if_to_docker(self, ext_if, int_if): self.update() @@ -284,56 +298,55 @@ def add_veth_if_to_docker(self, ext_if, int_if): VMTopology.cmd("ip link set dev %s mtu %d" % (ext_if, self.fp_mtu)) if t_int_if in self.host_ifaces: VMTopology.cmd("ip link set dev %s mtu %d" % (t_int_if, self.fp_mtu)) - elif t_int_if in self.ctr_ifaces: + elif t_int_if in self.cntr_ifaces: VMTopology.cmd("nsenter -t %s -n ip link set dev %s mtu %d" % (self.pid, t_int_if, self.fp_mtu)) - elif int_if in self.ctr_ifaces: + elif int_if in self.cntr_ifaces: VMTopology.cmd("nsenter -t %s -n ip link set dev %s mtu %d" % (self.pid, int_if, self.fp_mtu)) VMTopology.iface_up(ext_if) self.update() - if t_int_if in self.host_ifaces and t_int_if not in self.ctr_ifaces and int_if not in self.ctr_ifaces: + if t_int_if in self.host_ifaces and t_int_if not in self.cntr_ifaces and int_if not in self.cntr_ifaces: VMTopology.cmd("ip link set netns %s dev %s" % (self.pid, t_int_if)) self.update() - if t_int_if in self.ctr_ifaces and int_if not in self.ctr_ifaces: + if t_int_if in self.cntr_ifaces and int_if not in self.cntr_ifaces: VMTopology.cmd("nsenter -t %s -n ip link set dev %s name %s" % (self.pid, t_int_if, int_if)) VMTopology.iface_up(int_if, self.pid) return - def up_ext_iface(self): - if self.ext_iface in self.host_interfaces: - VMTopology.iface_up(self.ext_iface) + def bind_mgmt_port(self, br_name, mgmt_port): + if mgmt_port not in self.host_if_to_br: + VMTopology.cmd("brctl addif %s %s" % (br_name, mgmt_port)) + + return + + def unbind_mgmt_port(self, mgmt_port): + if mgmt_port in self.host_if_to_br: + VMTopology.cmd("brctl delif %s %s" % (self.host_if_to_br[mgmt_port], mgmt_port)) return def bind_fp_ports(self, disconnect_vm=False): for attr in self.VMs.itervalues(): for vlan_num, vlan in enumerate(attr['vlans']): - vlan_id = self.vlan_base + vlan - vlan_iface = "%s.%d" % (self.ext_iface, vlan_id) injected_iface = INJECTED_INTERFACES_TEMPLATE % (self.vm_set_name, vlan) - port0_bridge = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - vm_tap = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - self.create_phys_vlan(vlan_iface, vlan_id) - self.bind_phys_vlan(port0_bridge, vlan_iface, injected_iface, vm_tap, disconnect_vm) + br_name = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + vm_iface = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + self.bind_ovs_ports(br_name, self.dut_fp_ports[vlan], injected_iface, vm_iface, disconnect_vm) return def unbind_fp_ports(self): for attr in self.VMs.itervalues(): for vlan_num, vlan in enumerate(attr['vlans']): - vlan_id = self.vlan_base + vlan - vlan_iface = "%s.%d" % (self.ext_iface, vlan_id) - injected_iface = INJECTED_INTERFACES_TEMPLATE % (self.vm_set_name, vlan) - port0_bridge = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) - self.unbind_phys_vlan(port0_bridge, injected_iface) - self.unbind_phys_vlan(port0_bridge, vlan_iface) - self.destroy_phys_vlan(vlan_iface) + br_name = OVS_FP_BRIDGE_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + vm_iface = OVS_FP_TAP_TEMPLATE % (self.vm_names[self.vm_base_index + attr['vm_offset']], vlan_num) + self.unbind_ovs_ports(br_name, vm_iface) return @@ -380,7 +393,7 @@ def unbind_vm_backplane(self): back_int_name = BACK_ROOT_END_IF_TEMPLATE % vm_name vm_int_name = BACK_VM_END_IF_TEMPLATE % vm_name - self.unbind_phys_vlan(br_name, vm_int_name) + self.unbind_ovs_port(br_name, vm_int_name) if back_int_name in self.host_ifaces: VMTopology.iface_down(back_int_name) @@ -388,26 +401,18 @@ def unbind_vm_backplane(self): return - - def create_phys_vlan(self, vlan_iface, vlan_id): - if vlan_iface not in self.host_ifaces: - VMTopology.cmd('vconfig add %s %d' % (self.ext_iface, vlan_id)) - - VMTopology.iface_up(vlan_iface) - - return - - def bind_phys_vlan(self, br_name, vlan_iface, injected_iface, vm_iface, disconnect_vm=False): + def bind_ovs_ports(self, br_name, dut_iface, injected_iface, vm_iface, disconnect_vm=False): + """bind dut/injected/vm ports under an ovs bridge""" ports = VMTopology.get_ovs_br_ports(br_name) if injected_iface not in ports: VMTopology.cmd('ovs-vsctl add-port %s %s' % (br_name, injected_iface)) - if vlan_iface not in ports: - VMTopology.cmd('ovs-vsctl add-port %s %s' % (br_name, vlan_iface)) + if dut_iface not in ports: + VMTopology.cmd('ovs-vsctl add-port %s %s' % (br_name, dut_iface)) - bindings = VMTopology.get_ovs_port_bindings(br_name, vlan_iface) - vlan_iface_id = bindings[vlan_iface] + bindings = VMTopology.get_ovs_port_bindings(br_name, dut_iface) + dut_iface_id = bindings[dut_iface] injected_iface_id = bindings[injected_iface] vm_iface_id = bindings[vm_iface] @@ -419,41 +424,52 @@ def bind_phys_vlan(self, br_name, vlan_iface, injected_iface, vm_iface, disconne VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=drop" % (br_name, vm_iface_id)) else: # Add flow from a VM to an external iface - VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s" % (br_name, vm_iface_id, vlan_iface_id)) + VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s" % (br_name, vm_iface_id, dut_iface_id)) if disconnect_vm: # Add flow from external iface to ptf container - VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s" % (br_name, vlan_iface_id, injected_iface_id)) + VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s" % (br_name, dut_iface_id, injected_iface_id)) else: # Add flow from external iface to a VM and a ptf container - VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s,%s" % (br_name, vlan_iface_id, vm_iface_id, injected_iface_id)) + VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s,%s" % (br_name, dut_iface_id, vm_iface_id, injected_iface_id)) # Add flow from a ptf container to an external iface - VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s" % (br_name, injected_iface_id, vlan_iface_id)) + VMTopology.cmd("ovs-ofctl add-flow %s table=0,in_port=%s,action=output:%s" % (br_name, injected_iface_id, dut_iface_id)) return - def unbind_phys_vlan(self, br_name, vlan_iface): + def unbind_ovs_ports(self, br_name, vm_port): + """unbind all ports except the vm port from an ovs bridge""" ports = VMTopology.get_ovs_br_ports(br_name) - if vlan_iface in ports: - VMTopology.cmd('ovs-vsctl del-port %s %s' % (br_name, vlan_iface)) + for port in ports: + if port != vm_port: + VMTopology.cmd('ovs-vsctl del-port %s %s' % (br_name, port)) + + return + + def unbind_ovs_port(self, br_name, port): + """unbind a port from an ovs bridge""" + ports = VMTopology.get_ovs_br_ports(br_name) + + if port in ports: + VMTopology.cmd('ovs-vsctl del-port %s %s' % (br_name, port)) return def inject_host_ports(self): + """inject dut port into the ptf docker""" self.update() for vlan in self.host_interfaces: - self.add_phy_if_to_docker(PTF_FP_IFACE_TEMPLATE % vlan, self.vlan_base + vlan) + self.add_dut_if_to_docker(PTF_FP_IFACE_TEMPLATE % vlan, self.dut_fp_ports[vlan]) return - def destroy_phys_vlan(self, vlan_iface): - if vlan_iface in self.host_ifaces: - VMTopology.iface_down(vlan_iface) - VMTopology.cmd('vconfig rem %s' % vlan_iface) - - return + def deject_host_ports(self): + """deject dut port from the ptf docker""" + self.update() + for vlan in self.host_interfaces: + self.remove_dut_if_from_docker(PTF_FP_IFACE_TEMPLATE % vlan, self.dut_fp_ports[vlan]) @staticmethod def iface_up(iface_name, pid=None): @@ -489,8 +505,11 @@ def cmd(cmdline): @staticmethod def get_ovs_br_ports(bridge): out = VMTopology.cmd('ovs-vsctl list-ports %s' % bridge) - return set(out.split('\n')) - + ports = set() + for port in out.split('\n'): + if port != "": + ports.add(port) + return ports @staticmethod def get_ovs_port_bindings(bridge, vlan_iface = None): @@ -532,7 +551,10 @@ def ifconfig(cmdline): @staticmethod def get_pid(ptf_name): cli = Client(base_url='unix://var/run/docker.sock') - result = cli.inspect_container(ptf_name) + try: + result = cli.inspect_container(ptf_name) + except: + return None return result['State']['Pid'] @@ -561,21 +583,6 @@ def brctl(cmdline): return br_to_ifs, if_to_br - def find_base_vlan(self): - vlan_base = 0 - for attr in self.VMs.itervalues(): - vm_name = self.vm_names[self.vm_base_index + attr['vm_offset']] - if len(attr['vlans']) > 0: - br_name = OVS_FP_BRIDGE_TEMPLATE % (vm_name, 0) - out = VMTopology.cmd('ovs-vsctl list-ports %s' % br_name) - rows = out.split('\n') - for row in rows: - if row.startswith(self.ext_iface): - extracted_vlan = int(row[len(self.ext_iface)+1:]) - return extracted_vlan - attr['vlans'][0] - - raise Exception("Can't find previous vlan_base") - def check_topo(topo): hostif_exists = False vms_exists = False @@ -637,11 +644,11 @@ def main(): topo=dict(required=False, type='dict'), vm_names=dict(required=True, type='list'), vm_base=dict(required=False, type='str'), - vlan_base=dict(required=False, type='int'), - mgmt_ip_addr=dict(required=False, type='str'), - mgmt_ip_gw=dict(required=False, type='str'), + ptf_mgmt_ip_addr=dict(required=False, type='str'), + ptf_mgmt_ip_gw=dict(required=False, type='str'), mgmt_bridge=dict(required=False, type='str'), - ext_iface=dict(required=False, type='str'), + dut_fp_ports=dict(required=False, type='list'), + dut_mgmt_port=dict(required=False, type='str'), fp_mtu=dict(required=False, type='int', default=DEFAULT_MTU), max_fp_num=dict(required=False, type='int', default=NUM_FP_VLANS_PER_FP), ), @@ -651,6 +658,7 @@ def main(): vm_names = module.params['vm_names'] fp_mtu = module.params['fp_mtu'] max_fp_num = module.params['max_fp_num'] + dut_mgmt_port = None try: if os.path.exists(CMD_DEBUG_FNAME) and os.path.isfile(CMD_DEBUG_FNAME): @@ -665,15 +673,14 @@ def main(): elif cmd == 'bind': check_params(module, ['vm_set_name', 'topo', - 'mgmt_ip_addr', - 'mgmt_ip_gw', + 'ptf_mgmt_ip_addr', + 'ptf_mgmt_ip_gw', 'mgmt_bridge', - 'ext_iface'], cmd) + 'dut_fp_ports'], cmd) vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] - ext_iface = module.params['ext_iface'] - vlan_base = module.params['vlan_base'] + dut_fp_ports = module.params['dut_fp_ports'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) @@ -681,22 +688,23 @@ def main(): hostif_exists, vms_exists = check_topo(topo) if vms_exists: - check_params(module, ['vm_base', 'vlan_base'], cmd) + check_params(module, ['vm_base'], cmd) vm_base = module.params['vm_base'] else: vm_base = None - net.init(vm_set_name, topo, vm_base, vlan_base, ext_iface) + net.init(vm_set_name, topo, vm_base, dut_fp_ports) - mgmt_ip_addr = module.params['mgmt_ip_addr'] - mgmt_ip_gw = module.params['mgmt_ip_gw'] + ptf_mgmt_ip_addr = module.params['ptf_mgmt_ip_addr'] + ptf_mgmt_ip_gw = module.params['ptf_mgmt_ip_gw'] mgmt_bridge = module.params['mgmt_bridge'] - net.add_mgmt_port_to_docker(mgmt_bridge, mgmt_ip_addr, mgmt_ip_gw) - net.up_ext_iface() + net.add_mgmt_port_to_docker(mgmt_bridge, ptf_mgmt_ip_addr, ptf_mgmt_ip_gw) if vms_exists: net.add_veth_ports_to_docker() + if module.params['dut_mgmt_port']: + net.bind_mgmt_port(mgmt_bridge, module.params['dut_mgmt_port']) net.bind_fp_ports() net.bind_vm_backplane() @@ -705,41 +713,44 @@ def main(): elif cmd == 'unbind': check_params(module, ['vm_set_name', 'topo', - 'ext_iface'], cmd) + 'dut_fp_ports'], cmd) vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] - ext_iface = module.params['ext_iface'] - vlan_base = module.params['vlan_base'] + dut_fp_ports = module.params['dut_fp_ports'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) - _, vms_exists = check_topo(topo) + hostif_exists, vms_exists = check_topo(topo) if vms_exists: - check_params(module, ['vm_base', 'vlan_base'], cmd) + check_params(module, ['vm_base'], cmd) vm_base = module.params['vm_base'] else: vm_base = None - net.init(vm_set_name, topo, vm_base, vlan_base, ext_iface, False) + net.init(vm_set_name, topo, vm_base, dut_fp_ports) if vms_exists: + if module.params['dut_mgmt_port']: + net.unbind_mgmt_port(module.params['dut_mgmt_port']) net.unbind_vm_backplane() net.unbind_fp_ports() + + if hostif_exists: + net.deject_host_ports() elif cmd == 'renumber': check_params(module, ['vm_set_name', 'topo', - 'mgmt_ip_addr', - 'mgmt_ip_gw', + 'ptf_mgmt_ip_addr', + 'ptf_mgmt_ip_gw', 'mgmt_bridge', - 'ext_iface'], cmd) + 'dut_fp_ports'], cmd) vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] - ext_iface = module.params['ext_iface'] - vlan_base = module.params['vlan_base'] + dut_fp_ports = module.params['dut_fp_ports'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) @@ -747,25 +758,21 @@ def main(): hostif_exists, vms_exists = check_topo(topo) if vms_exists: - check_params(module, ['vm_base', 'vlan_base'], cmd) + check_params(module, ['vm_base'], cmd) vm_base = module.params['vm_base'] else: vm_base = None - net.init(vm_set_name, topo, vm_base, vlan_base, ext_iface, True) + net.init(vm_set_name, topo, vm_base, dut_fp_ports, True) - mgmt_ip_addr = module.params['mgmt_ip_addr'] - mgmt_ip_gw = module.params['mgmt_ip_gw'] + ptf_mgmt_ip_addr = module.params['ptf_mgmt_ip_addr'] + ptf_mgmt_ip_gw = module.params['ptf_mgmt_ip_gw'] mgmt_bridge = module.params['mgmt_bridge'] - net.add_mgmt_port_to_docker(mgmt_bridge, mgmt_ip_addr, mgmt_ip_gw) + net.add_mgmt_port_to_docker(mgmt_bridge, ptf_mgmt_ip_addr, ptf_mgmt_ip_gw) if vms_exists: - new_vlan_base = net.vlan_base - net.vlan_base = net.find_base_vlan() # Use old vlan base to remove previous vlan net.unbind_fp_ports() - net.vlan_base = new_vlan_base - # self.vlan_base = restore new one net.add_veth_ports_to_docker() net.bind_fp_ports() if hostif_exists: @@ -773,12 +780,11 @@ def main(): elif cmd == 'connect-vms' or cmd == 'disconnect-vms': check_params(module, ['vm_set_name', 'topo', - 'ext_iface'], cmd) + 'dut_fp_ports'], cmd) vm_set_name = module.params['vm_set_name'] topo = module.params['topo'] - ext_iface = module.params['ext_iface'] - vlan_base = module.params['vlan_base'] + dut_fp_ports = module.params['dut_fp_ports'] if len(vm_set_name) > VM_SET_NAME_MAX_LEN: raise Exception("vm_set_name can't be longer than %d characters: %s (%d)" % (VM_SET_NAME_MAX_LEN, vm_set_name, len(vm_set_name))) @@ -786,12 +792,12 @@ def main(): hostif_exists, vms_exists = check_topo(topo) if vms_exists: - check_params(module, ['vm_base', 'vlan_base'], cmd) + check_params(module, ['vm_base'], cmd) vm_base = module.params['vm_base'] else: vm_base = None - net.init(vm_set_name, topo, vm_base, vlan_base, ext_iface) + net.init(vm_set_name, topo, vm_base, dut_fp_ports) if vms_exists: if cmd == 'connect-vms': diff --git a/ansible/roles/vm_set/tasks/add_topo.yml b/ansible/roles/vm_set/tasks/add_topo.yml index 5fd2d0786ec..292ca5cff4b 100644 --- a/ansible/roles/vm_set/tasks/add_topo.yml +++ b/ansible/roles/vm_set/tasks/add_topo.yml @@ -17,18 +17,29 @@ command: docker exec -i ptf_{{ vm_set_name }} sysctl -w net.ipv6.conf.all.disable_ipv6=0 become: yes -- name: Bind topology {{ topo }} to VMs. base vm = {{ VM_base }} base vlan = {{ vlan_base }} +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml + +- name: Setup vlan port for vlan tunnel + vlan_port: + external_port: "{{ external_port }}" + vlan_ids: "{{ device_vlan_list }}" + cmd: "create" + become: yes + when: external_port is defined + +- name: Bind topology {{ topo }} to VMs. base vm = {{ VM_base }} vm_topology: cmd: "bind" vm_set_name: "{{ vm_set_name }}" topo: "{{ topology }}" vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" - vlan_base: "{{ vlan_base }}" - mgmt_ip_addr: "{{ ptf_ip }}" - mgmt_ip_gw: "{{ mgmt_gw }}" + ptf_mgmt_ip_addr: "{{ ptf_ip }}" + ptf_mgmt_ip_gw: "{{ mgmt_gw }}" mgmt_bridge: "{{ mgmt_bridge }}" - ext_iface: "{{ external_iface }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" fp_mtu: "{{ fp_mtu_size }}" max_fp_num: "{{ max_fp_num }}" become: yes diff --git a/ansible/roles/vm_set/tasks/connect_vms.yml b/ansible/roles/vm_set/tasks/connect_vms.yml index 9c29224c23e..00c62bc9448 100644 --- a/ansible/roles/vm_set/tasks/connect_vms.yml +++ b/ansible/roles/vm_set/tasks/connect_vms.yml @@ -1,12 +1,15 @@ -- name: Connect VMs to {{ topo }}. base vm = {{ VM_base }} base vlan = {{ vlan_base }} +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml + +- name: Connect VMs to {{ topo }}. base vm = {{ VM_base }} vm_topology: cmd: "connect-vms" vm_set_name: "{{ vm_set_name }}" topo: "{{ topology }}" vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" - vlan_base: "{{ vlan_base }}" - ext_iface: "{{ external_iface }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" fp_mtu: "{{ fp_mtu_size }}" max_fp_num: "{{ max_fp_num }}" become: yes diff --git a/ansible/roles/vm_set/tasks/disconnect_vms.yml b/ansible/roles/vm_set/tasks/disconnect_vms.yml index 55301f4c09c..4574eaa1ec4 100644 --- a/ansible/roles/vm_set/tasks/disconnect_vms.yml +++ b/ansible/roles/vm_set/tasks/disconnect_vms.yml @@ -1,11 +1,14 @@ -- name: Disconnect VMs to {{ topo }}. base vm = {{ VM_base }} base vlan = {{ vlan_base }} +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml + +- name: Disconnect VMs to {{ topo }}. base vm = {{ VM_base }} vm_topology: cmd: "disconnect-vms" vm_set_name: "{{ vm_set_name }}" topo: "{{ topology }}" vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" - vlan_base: "{{ vlan_base }}" - ext_iface: "{{ external_iface }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" max_fp_num: "{{ max_fp_num }}" become: yes diff --git a/ansible/roles/vm_set/tasks/docker.yml b/ansible/roles/vm_set/tasks/docker.yml new file mode 100644 index 00000000000..bec6ba2bd6d --- /dev/null +++ b/ansible/roles/vm_set/tasks/docker.yml @@ -0,0 +1,37 @@ +- name: Add docker repository for 16.04 + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable + state: present + become: yes + when: host_distribution_version.stdout == "16.04" + +- name: Add docker repository for 17.04 + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu zesty stable + state: present + become: yes + when: host_distribution_version == "17.04" + +- name: Add docker repository for 18.04 + apt_repository: + repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable + state: present + become: yes + when: host_distribution_version.stdout == "18.04" + +- name: Add docker official GPG key + apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + become: yes + environment: "{{ proxy_env | default({}) }}" + +- name: Install docker-ce + apt: pkg=docker-ce update_cache=yes + become: yes + environment: "{{ proxy_env | default({}) }}" + +- name: Install python packages + pip: name=docker-py state=present version=1.7.2 + become: yes + environment: "{{ proxy_env | default({}) }}" diff --git a/ansible/roles/vm_set/tasks/external_port.yml b/ansible/roles/vm_set/tasks/external_port.yml new file mode 100644 index 00000000000..9357b0b6a01 --- /dev/null +++ b/ansible/roles/vm_set/tasks/external_port.yml @@ -0,0 +1,10 @@ +- name: setup external interface as trunk port + template: src=external_port.j2 + dest=/etc/network/interfaces.d/external_port + become: yes + register: external_port_cfg + +- name: bring up external port + shell: /sbin/ifup {{ external_port }} --force + become: yes + when: external_port_cfg.changed diff --git a/ansible/roles/vm_set/tasks/internal_mgmt_network.yml b/ansible/roles/vm_set/tasks/internal_mgmt_network.yml new file mode 100644 index 00000000000..76ae0facfef --- /dev/null +++ b/ansible/roles/vm_set/tasks/internal_mgmt_network.yml @@ -0,0 +1,8 @@ +- name: create management bridge + shell: brctl add {{ mgmt_bridge }} + become: yes + ignore_errors: yes + +- name: bring up external port + shell: /sbin/ifconfig {{ mgmt_bridge }} up + become: yes diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 936aabd873d..22b502b8881 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -2,7 +2,7 @@ # Input parameters for the role: # - action: 'start', 'stop' or 'renumber' for creating, removeing, or renumbering vm set respectively # - id: sequence number for vm set on the host. -# - external_iface: interface which will be used as parent for vlan interface creation +# - external_port: interface which will be used as parent for vlan interface creation # - vlan_base: first vlan id for the VMs # - VMs: a dictionary which contains hostnames of VMs as a key and a dictionary with parameters (num, memory, mgmt_ip) for every VM. # - topology: a dictionary which contains hostnames of VMs as a key and vlans value which define a topology (numbers of connected ports for every VM) @@ -18,14 +18,17 @@ - name: get host distribution shell: grep ^NAME /etc/os-release | awk -F '=' '{print $2}' | tr -d '"' register: host_distribution + changed_when: False - name: get host distribution version shell: grep ^VERSION_ID /etc/os-release | awk -F '=' '{print $2}' | tr -d '"' register: host_distribution_version + changed_when: False - name: get host kernel version shell: uname -r register: host_kernel + changed_when: False - name: Check if kernel upgrade needed set_fact: @@ -51,20 +54,6 @@ msg: "Kernel upgraded, need to reboot!" when: kernel_upgrade_needed is defined -- name: Add docker repository for 17.04 - apt_repository: - repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu zesty stable - state: present - become: yes - when: host_distribution_version == "17.04" - -- name: Add docker repository for 16.04 - apt_repository: - repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable - state: present - become: yes - when: host_distribution_version == "16.04" - - name: Install necessary packages apt: pkg={{ item }} update_cache=yes cache_valid_time=86400 become: yes @@ -83,27 +72,53 @@ - ca-certificates - curl - software-properties-common - - docker-ce -- name: Install python packages - pip: name=docker-py state=present version=1.7.2 +- include: docker.yml + +- name: Ensure {{ ansible_user }} in docker,sudo group + user: + name: "{{ ansible_user }}" + groups: docker,sudo become: yes - environment: "{{ proxy_env | default({}) }}" - name: Install br_netfilter kernel module become: yes modprobe: name=br_netfilter state=present -- name: setup trunk port - template: src=trunk_port.j2 - dest=/etc/network/interfaces.d/trunk_port +- name: Set sysctl bridge parameters for testbed + sysctl: + name: "{{ item }}" + value: 0 + sysctl_set: yes become: yes - register: trunk_port - -- name: bring up trunk port - shell: /sbin/ifup {{ external_iface }} --force + with_items: + - net.bridge.bridge-nf-call-arptables + - net.bridge.bridge-nf-call-ip6tables + - net.bridge.bridge-nf-call-iptables + +- name: Set sysctl RCVBUF parameter for testbed + sysctl: + name: "net.core.rmem_max" + value: 509430500 + sysctl_set: yes become: yes - when: trunk_port.changed + +- name: Setup external front port + include: external_port.yml + when: external_port is defined + +- name: Setup internal management network + include: internal_mgmt_network.yml + when: internal_mgmt_network is defined and internal_mgmt_network == True + +- block: + - getent: + database: passwd + key: "{{ ansible_user }}" + split: ":" + - set_fact: + home_path: "{{ getent_passwd[ansible_user][4] }}" + - debug: msg="{{ home_path }}" - name: Ensure {{ root_path }} exists file: path={{ root_path }} state=directory @@ -123,12 +138,14 @@ virt: command=list_vms uri=qemu:///system register: vm_list_defined + become: true - name: Retrieve a list of the running VMs virt: command=list_vms uri=qemu:///system state=running register: vm_list_running + become: true - name: Find current server group set_fact: current_server={{ group_names | extract_by_prefix('server_') }} @@ -163,3 +180,11 @@ - name: Disconnect VMs include: disconnect_vms.yml when: action == 'disconnect_vms' + +- name: Start SONiC VM + include: start_sonic_vm.yml + when: action == 'start_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' + +- name: Stop SONiC VM + include: stop_sonic_vm.yml + when: action == 'stop_sonic_vm' and hostvars[dut_name].type is defined and hostvars[dut_name]['type'] == 'kvm' diff --git a/ansible/roles/vm_set/tasks/remove_topo.yml b/ansible/roles/vm_set/tasks/remove_topo.yml index 6d01a21d88b..9201357d588 100644 --- a/ansible/roles/vm_set/tasks/remove_topo.yml +++ b/ansible/roles/vm_set/tasks/remove_topo.yml @@ -1,18 +1,29 @@ -- name: Remove ptf docker container ptf_{{ vm_set_name }} - docker: - name: ptf_{{ vm_set_name }} - image: "{{ docker_registry_host }}/{{ ptf_imagename }}" - state: absent - become: yes +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml -- name: Unbind topology {{ topo }} to VMs. base vm = {{ VM_base }} base vlan = {{ vlan_base }} +- name: Unbind topology {{ topo }} to VMs. base vm = {{ VM_base }} vm_topology: cmd: "unbind" vm_set_name: "{{ vm_set_name }}" topo: "{{ topology }}" vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" - vlan_base: "{{ vlan_base }}" - ext_iface: "{{ external_iface }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" max_fp_num: "{{ max_fp_num }}" become: yes + +- name: Remove vlan port for vlan tunnel + vlan_port: + external_port: "{{ external_port }}" + vlan_ids: "{{ device_vlan_list }}" + cmd: "remove" + become: yes + when: external_port is defined + +- name: Remove ptf docker container ptf_{{ vm_set_name }} + docker: + name: ptf_{{ vm_set_name }} + image: "{{ docker_registry_host }}/{{ ptf_imagename }}" + state: absent + become: yes diff --git a/ansible/roles/vm_set/tasks/renumber_topo.yml b/ansible/roles/vm_set/tasks/renumber_topo.yml index 28953425ca8..1f98a251ae3 100644 --- a/ansible/roles/vm_set/tasks/renumber_topo.yml +++ b/ansible/roles/vm_set/tasks/renumber_topo.yml @@ -19,18 +19,21 @@ cap_add: NET_ADMIN become: yes -- name: Renumber topology {{ topo }} to VMs. base vm = {{ VM_base }} base vlan = {{ vlan_base }} +- name: Set front panel/mgmt port for dut + include: set_dut_port.yml + +- name: Renumber topology {{ topo }} to VMs. base vm = {{ VM_base }} vm_topology: cmd: "renumber" vm_set_name: "{{ vm_set_name }}" topo: "{{ topology }}" vm_names: "{{ VM_hosts }}" vm_base: "{{ VM_base }}" - vlan_base: "{{ vlan_base }}" - mgmt_ip_addr: "{{ ptf_ip }}" - mgmt_ip_gw: "{{ mgmt_gw }}" + ptf_mgmt_ip_addr: "{{ ptf_ip }}" + ptf_mgmt_ip_gw: "{{ mgmt_gw }}" mgmt_bridge: "{{ mgmt_bridge }}" - ext_iface: "{{ external_iface }}" + dut_fp_ports: "{{ dut_fp_ports }}" + dut_mgmt_port: "{{ dut_mgmt_port }}" fp_mtu: "{{ fp_mtu_size }}" max_fp_num: "{{ max_fp_num }}" become: yes diff --git a/ansible/roles/vm_set/tasks/set_dut_port.yml b/ansible/roles/vm_set/tasks/set_dut_port.yml new file mode 100644 index 00000000000..df42e09b930 --- /dev/null +++ b/ansible/roles/vm_set/tasks/set_dut_port.yml @@ -0,0 +1,18 @@ +- name: Set front panel port for vlan tunnel + vlan_port: + external_port: "{{ external_port }}" + vlan_ids: "{{ device_vlan_list }}" + cmd: "list" + become: yes + when: external_port is defined + +- name: Setup mgmt port for physical dut + set_fact: + dut_mgmt_port: "" + when: external_port is defined + +- name: Get front panel and mgmt port for kvm vm + kvm_port: + vmname: "{{ dut_name }}" + when: external_port is not defined + become: yes diff --git a/ansible/roles/vm_set/tasks/start.yml b/ansible/roles/vm_set/tasks/start.yml index b763a86a8b6..defb28a469e 100644 --- a/ansible/roles/vm_set/tasks/start.yml +++ b/ansible/roles/vm_set/tasks/start.yml @@ -1,21 +1,3 @@ -- name: Set sysctl bridge parameters for testbed - sysctl: - name: "{{ item }}" - value: 0 - sysctl_set: yes - become: yes - with_items: - - net.bridge.bridge-nf-call-arptables - - net.bridge.bridge-nf-call-ip6tables - - net.bridge.bridge-nf-call-iptables - -- name: Set sysctl RCVBUF parameter for testbed - sysctl: - name: "net.core.rmem_max" - value: 509430500 - sysctl_set: yes - become: yes - - name: Create directory for vm images and vm disks file: path={{ item }} state=directory mode=0755 with_items: @@ -61,9 +43,10 @@ hostname: "{{ vm_name }}" mgmt_ip_address: "{{ hostvars[vm_name]['ansible_host'] }}" serial_port: "{{ vm_console_base|int + vm_name[4:]|int }}" - disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk" + src_disk_image: "{{ root_path }}/images/{{ hdd_image_filename }}" + disk_image: "{{ home_path }}/{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk" + cdrom_image: "{{ home_path }}/{{ root_path }}/images/{{ cd_image_filename }}" mgmt_tap: "{{ vm_name }}-m" port1_bridge: "br-{{ vm_name }}-back" port1_tap: "{{ vm_name }}-back" with_items: "{{ VM_hosts }}" - diff --git a/ansible/roles/vm_set/tasks/start_sonic_vm.yml b/ansible/roles/vm_set/tasks/start_sonic_vm.yml new file mode 100644 index 00000000000..b176b563b58 --- /dev/null +++ b/ansible/roles/vm_set/tasks/start_sonic_vm.yml @@ -0,0 +1,51 @@ +- name: Create directory for vm images and vm disks + file: path={{ item }} state=directory mode=0755 + with_items: + - "sonic-vm/images" + - "sonic-vm/disks" + +- set_fact: + src_disk_image: "{{ home_path }}/sonic-vm/images/sonic-vs.img" + disk_image: "{{ home_path }}/sonic-vm/disks/sonic_{{ dut_name }}.img" + mgmt_ip_address: " {{ hostvars[dut_name]['ansible_host'] }}" + serial_port: 9000 + +- name: Device debug output + debug: msg="hostname = {{ dut_name }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}" + +- name: Check destination file existance + stat: path={{ disk_image }} + register: file_stat + +- name: Copy sonic disk image for {{ dut_name }} + copy: src={{ src_disk_image }} dest={{ disk_image }} remote_src=True + when: not file_stat.stat.exists + +- name: Define vm {{ dut_name }} + virt: name={{ dut_name }} + command=define + xml="{{ lookup('template', 'templates/sonic.xml.j2') }}" + uri=qemu:///system + when: dut_name not in vm_list_defined.list_vms + become: yes + +- name: Start vm {{ dut_name }} + virt: name={{ dut_name }} + state=running + uri=qemu:///system + when: dut_name not in vm_list_running.list_vms + become: yes + +- name: Wait until vm {{ dut_name }} is loaded + sonic_kickstart: telnet_port={{ serial_port }} + login={{ sonic_login }} + passwords={{ sonic_passwords }} + hostname={{ dut_name }} + mgmt_ip="{{ mgmt_ip_address }}/{{ mgmt_prefixlen }}" + mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }} + new_password={{ sonic_new_password }} + register: kickstart_output + +- name: Fail if kickstart gives error for {{ dut_name }} + fail: msg="Start sonic vm weren't succesfull" + when: kickstart_output.kickstart_code != 0 diff --git a/ansible/roles/vm_set/tasks/start_vm.yml b/ansible/roles/vm_set/tasks/start_vm.yml index 407c3dcd0cb..2e317f0c5f5 100644 --- a/ansible/roles/vm_set/tasks/start_vm.yml +++ b/ansible/roles/vm_set/tasks/start_vm.yml @@ -32,18 +32,21 @@ xml="{{ lookup('template', 'templates/arista.xml.j2') }}" uri=qemu:///system when: vm_name not in vm_list_defined.list_vms + become: yes - name: Destroy vm {{ vm_name }} if it requires fix virt: name={{ vm_name }} command=destroy uri=qemu:///system when: vm_name in respin_vms + become: yes - name: Start vm {{ vm_name }} virt: name={{ vm_name }} state=running uri=qemu:///system when: vm_name not in vm_list_running.list_vms or vm_name in respin_vms + become: yes - name: Wait until vm {{ vm_name }} is loaded kickstart: telnet_port={{ serial_port }} @@ -63,12 +66,14 @@ command=destroy uri=qemu:///system when: vm_name not in vm_list_running.list_vms and kickstart_output.kickstart_code != 0 + become: yes - name: Start vm again {{ vm_name }} virt: name={{ vm_name }} state=running uri=qemu:///system when: vm_name not in vm_list_running.list_vms and kickstart_output.kickstart_code != 0 + become: yes - name: Wait until vm {{ vm_name }} is loaded kickstart: telnet_port={{ serial_port }} diff --git a/ansible/roles/vm_set/tasks/stop_sonic_vm.yml b/ansible/roles/vm_set/tasks/stop_sonic_vm.yml new file mode 100644 index 00000000000..f08a27711e7 --- /dev/null +++ b/ansible/roles/vm_set/tasks/stop_sonic_vm.yml @@ -0,0 +1,19 @@ +- set_fact: + disk_image: "{{ home_path }}/sonic-vm/disks/sonic_{{ dut_name }}.img" + +- name: Destroy vm {{ dut_name }} + virt: name={{ dut_name }} + state=destroyed + uri=qemu:///system + when: dut_name in vm_list_running.list_vms + become: yes + +- name: Undefine vm {{ dut_name }} + virt: name={{ dut_name }} + command=undefine + uri=qemu:///system + when: dut_name in vm_list_defined.list_vms + become: yes + +- name: Copy sonic disk image for {{ dut_name }} + file: path={{ disk_image }} state=absent diff --git a/ansible/roles/vm_set/tasks/stop_vm.yml b/ansible/roles/vm_set/tasks/stop_vm.yml index 6bc01ef2dfc..4552d4af7ad 100644 --- a/ansible/roles/vm_set/tasks/stop_vm.yml +++ b/ansible/roles/vm_set/tasks/stop_vm.yml @@ -3,12 +3,14 @@ state=destroyed uri=qemu:///system when: vm_name in vm_list_running.list_vms + become: yes - name: Undefine VM {{ vm_name }} virt: name={{ vm_name }} command=undefine uri=qemu:///system when: vm_name in vm_list_defined.list_vms + become: yes - name: Remove arista disk image for {{ vm_name }} file: path={{ disk_image }} state=absent diff --git a/ansible/roles/vm_set/templates/external_port.j2 b/ansible/roles/vm_set/templates/external_port.j2 new file mode 100644 index 00000000000..f83d394c878 --- /dev/null +++ b/ansible/roles/vm_set/templates/external_port.j2 @@ -0,0 +1,5 @@ +# trunk port +auto {{ external_port }} +iface {{ external_port }} inet manual + mtu 9216 + up ip link set {{ external_port }} up diff --git a/ansible/roles/vm_set/templates/sonic.xml.j2 b/ansible/roles/vm_set/templates/sonic.xml.j2 new file mode 100644 index 00000000000..4838566d7c5 --- /dev/null +++ b/ansible/roles/vm_set/templates/sonic.xml.j2 @@ -0,0 +1,139 @@ + + {{ dut_name }} + 2048000 + 2048000 + 1 + + /machine + + + hvm + + + + + + + + destroy + restart + restart + + /usr/bin/qemu-system-x86_64 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + diff --git a/ansible/roles/vm_set/templates/trunk_port.j2 b/ansible/roles/vm_set/templates/trunk_port.j2 deleted file mode 100644 index 7103f5a33ed..00000000000 --- a/ansible/roles/vm_set/templates/trunk_port.j2 +++ /dev/null @@ -1,5 +0,0 @@ -# trunk port -auto {{ external_iface }} -iface {{ external_iface }} inet manual - mtu 9216 - up ip link set {{ external_iface }} up diff --git a/ansible/roles/vm_set/vars/main.yml b/ansible/roles/vm_set/vars/main.yml index b5b80058a78..344784ecec7 100644 --- a/ansible/roles/vm_set/vars/main.yml +++ b/ansible/roles/vm_set/vars/main.yml @@ -4,8 +4,11 @@ new_login: admin new_password: 123456 new_root_password: 123456 -src_disk_image: "{{ root_path }}/images/{{ hdd_image_filename }}" -cdrom_image: "{{ root_path }}/images/{{ cd_image_filename }}" +sonic_login: "admin" +sonic_passwords: + - "YourPaSsWoRd" + - "password" +sonic_new_password: "password" tor_memory: 1572864 spine_memory: 2097152 diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh index 203cc17e2f4..a1fcc699a79 100755 --- a/ansible/testbed-cli.sh +++ b/ansible/testbed-cli.sh @@ -5,27 +5,34 @@ set -e function usage { echo "testbed-cli. Interface to testbeds" - echo "Usage : $0 { start-vms | stop-vms } server-name vault-password-file" - echo " to fix a subset of VMs:" + echo "Usage :" + echo " $0 [options] { start-vms | stop-vms } server-name vault-password-file" + echo " $0 [options] { add-topo | remove-topo | renumber-topo | connect-topo } topo-name vault-password-file" + echo " $0 [options] { refresh-dut } topo-name vault-password-file" + echo " $0 [options] { connect-vms | disconnect-vms } topo-name vault-password-file" + echo " $0 [options] { config-vm } topo-name vm-name vault-password-file" + echo " $0 [options] { gen-mg | deploy-mg | test-mg } topo-name inventory vault-password-file" + echo + echo "Options :" + echo " -t tbfile : testbed csv file name (default testbed.csv)" + echo " -m vmfile : virtual machine file name (default veos)" + echo + echo "To start VMs on a server: $0 start-vms 'server-name' ~/.password" + echo "To restart a subset of VMs:" echo " $0 start-vms server-name vault-password-fix -e respin_vms=[vm list]" echo " vm list is separated by comma and shouldn't have space in the list." echo " e.g. respin_vms=[VM0310,VM0330]" - echo "Usage : $0 { add-topo | remove-topo | renumber-topo | connect-topo } topo-name vault-password-file" - echo "Usage : $0 { connect-vms | disconnect-vms } topo-name vault-password-file" - echo "Usage : $0 { config-vm } topo-name vm-name vault-password-file" - echo "Usage : $0 { gen-mg | deploy-mg | test-mg } topo-name inventory vault-password-file" - echo - echo "To start VMs on a server: $0 start-vms 'server-name' ~/.password" echo "To stop VMs on a server: $0 stop-vms 'server-name' ~/.password" echo "To deploy a topology on a server: $0 add-topo 'topo-name' ~/.password" echo "To remove a topology on a server: $0 remove-topo 'topo-name' ~/.password" echo "To renumber a topology on a server: $0 renumber-topo 'topo-name' ~/.password" , where topo-name is target topology echo "To connect a topology: $0 connect-topo 'topo-name' ~/.password" + echo "To refresh DUT in a topology: $0 refresh-dut 'topo-name' ~/.password" echo "To configure a VM on a server: $0 config-vm 'topo-name' 'vm-name' ~/.password" echo "To generate minigraph for DUT in a topology: $0 gen-mg 'topo-name' ~/.password" echo "To deploy minigraph to DUT in a topology: $0 deploy-mg 'topo-name' ~/.password" echo - echo "You should define your topology in testbed.csv file" + echo "You should define your topology in testbed csv file" echo exit } @@ -35,7 +42,7 @@ function read_file echo reading # Filter testbed names in the first column in the testbed definition file - line=$(cat testbed.csv | grep "^$1,") + line=$(cat $tbfile | grep "^$1,") if [ $? -ne 0 ] then @@ -46,7 +53,7 @@ function read_file NL=' ' case $line in - *"$NL"*) echo "Find more than one topology names in testbed.csv" + *"$NL"*) echo "Find more than one topology names in $tbfile" exit ;; *) echo Found topology $1 @@ -72,7 +79,7 @@ function start_vms shift echo "Starting VMs on server '${server}'" - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_start_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_start_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@ } function stop_vms @@ -83,7 +90,7 @@ function stop_vms shift echo "Stopping VMs on server '${server}'" - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_stop_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_stop_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@ } function add_topo @@ -96,9 +103,9 @@ function add_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_add_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ - ansible-playbook fanout_connect.yml -i veos --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ echo Done } @@ -113,7 +120,7 @@ function remove_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_remove_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ echo Done } @@ -128,9 +135,24 @@ function renumber_topo read_file ${topology} - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_renumber_vm_topology.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ - ansible-playbook fanout_connect.yml -i veos --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="${passwd}" -e "dut=$dut" $@ + + echo Done +} + +function refresh_dut +{ + topology=$1 + passwd=$2 + shift + shift + echo "Refresh $dut in '${topology}'" + + read_file ${topology} + + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_refresh_dut.yml --vault-password-file="${passwd}" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e ptf_ip="$ptf_ip" -e topo="$topo" -e vm_set_name="$testbed_name" -e ptf_imagename="$ptf_imagename" $@ echo Done } @@ -141,7 +163,7 @@ function connect_vms read_file $1 - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_connect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_connect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" echo Done } @@ -152,7 +174,7 @@ function disconnect_vms read_file $1 - ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_disconnect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" + ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_disconnect_vms.yml --vault-password-file="$2" -l "$server" -e topo_name="$topo_name" -e dut_name="$dut" -e VM_base="$vm_base" -e topo="$topo" -e vm_set_name="$testbed_name" echo Done } @@ -164,7 +186,7 @@ function generate_minigraph read_file $1 - ansible-playbook -i "$2" config_sonic_basedon_testbed.yml --vault-password-file="$3" -l "$dut" -e testbed_name="$1" -v + ansible-playbook -i "$2" config_sonic_basedon_testbed.yml --vault-password-file="$3" -l "$dut" -e testbed_name="$1" -e testbed_file=$tbfile -v echo Done } @@ -175,7 +197,7 @@ function deploy_minigraph read_file $1 - ansible-playbook -i "$2" config_sonic_basedon_testbed.yml --vault-password-file="$3" -l "$dut" -e testbed_name="$1" -e deploy=true -e save=true + ansible-playbook -i "$2" config_sonic_basedon_testbed.yml --vault-password-file="$3" -l "$dut" -e testbed_name="$1" -e testbed_file=$tbfile -e deploy=true -e save=true echo Done } @@ -197,7 +219,7 @@ function config_vm read_file $1 - ansible-playbook -i veos eos.yml --vault-password-file="$3" -l "$2" -e topo="$topo" -e VM_base="$vm_base" + ansible-playbook -i $vmfile eos.yml --vault-password-file="$3" -l "$2" -e topo="$topo" -e VM_base="$vm_base" echo Done } @@ -208,12 +230,30 @@ function connect_topo read_file $1 - ansible-playbook fanout_connect.yml -i veos --limit "$server" --vault-password-file="$2" -e "dut=$dut" + ansible-playbook fanout_connect.yml -i $vmfile --limit "$server" --vault-password-file="$2" -e "dut=$dut" } +vmfile=veos +tbfile=testbed.csv + +while getopts "t:m:" OPTION; do + case $OPTION in + t) + tbfile=$OPTARG + ;; + m) + vmfile=$OPTARG + ;; + *) + usage + esac +done + +shift $((OPTIND-1)) + if [ $# -lt 3 ] then - usage + usage fi subcmd=$1 @@ -231,6 +271,18 @@ case "${subcmd}" in ;; connect-topo) connect_topo $@ ;; + refresh-dut) refresh_dut $@ + ;; + connect-vms) connect_vms $@ + ;; + disconnect-vms) disconnect_vms $@ + ;; + config-vm) config_vm $@ + ;; + gen-mg) generate_minigraph $@ + ;; + deploy-mg) deploy_minigraph $@ + ;; connect-vms) connect_vms $@ ;; disconnect-vms) disconnect_vms $@ diff --git a/ansible/testbed-new.yaml b/ansible/testbed-new.yaml index 28fc9e0741a..a28ce7764f3 100644 --- a/ansible/testbed-new.yaml +++ b/ansible/testbed-new.yaml @@ -218,7 +218,7 @@ host_vars: mgmt_bridge: br1 # source: sonic-mgmt/host_vars mgmt_prefixlen: 24 # source: sonic-mgmt/host_vars mgmt_gw: 10.250.0.1 # source: sonic-mgmt/host_vars - external_iface: ens3f0 # source: sonic-mgmt/host_vars + external_port: ens3f0 # source: sonic-mgmt/host_vars # veos_groups is a dictionary that contains all veos groups # veos_groups is used to generate the veos file diff --git a/ansible/testbed_add_vm_topology.yml b/ansible/testbed_add_vm_topology.yml index e99a71bf405..23ee028e9ad 100644 --- a/ansible/testbed_add_vm_topology.yml +++ b/ansible/testbed_add_vm_topology.yml @@ -71,10 +71,8 @@ conn_graph_facts: host={{ dut_name }} connection: local - - name: Extracting vlan_range - set_fact: vlan_base={{ device_vlan_list | min }} - roles: + - { role: vm_set, action: 'start_sonic_vm' } - { role: vm_set, action: 'add_topo' } - hosts: servers:&eos @@ -107,5 +105,3 @@ roles: - { role: eos, when: topology.VMs is defined and inventory_hostname in VM_targets } # role eos will be executed in any case, and when will evaluate with every task - - diff --git a/ansible/testbed_connect_vms.yml b/ansible/testbed_connect_vms.yml index e1a1955ca52..0bd464407d3 100644 --- a/ansible/testbed_connect_vms.yml +++ b/ansible/testbed_connect_vms.yml @@ -45,8 +45,5 @@ conn_graph_facts: host={{ dut_name }} connection: local - - name: Extracting vlan_range - set_fact: vlan_base={{ device_vlan_list | min }} - roles: - { role: vm_set, action: 'connect_vms' } diff --git a/ansible/testbed_disconnect_vms.yml b/ansible/testbed_disconnect_vms.yml index efaec9c6632..1b32bb17311 100644 --- a/ansible/testbed_disconnect_vms.yml +++ b/ansible/testbed_disconnect_vms.yml @@ -45,8 +45,5 @@ conn_graph_facts: host={{ dut_name }} connection: local - - name: Extracting vlan_range - set_fact: vlan_base={{ device_vlan_list | min }} - roles: - { role: vm_set, action: 'disconnect_vms' } diff --git a/ansible/testbed_refresh_dut.yml b/ansible/testbed_refresh_dut.yml new file mode 100644 index 00000000000..2ae764bfb22 --- /dev/null +++ b/ansible/testbed_refresh_dut.yml @@ -0,0 +1,77 @@ +# This Playbook add refresh DUT in a topology +# +# Topologies are defined inside of vars/ directorie in files vars/topo_{{ topology_name}}.yml +# This file contains three structures: +# - topology +# - configuration property +# - configuration +# +# topology key contains a dictionary of hostnames with 'vm_offset' and 'vlans' keys in it. +# 'vm_offset' is used to map current hostname vm_set VM to server VM (like ARISTA01T0 -> VM0300). +# This offset is used on VM_base +# 'vlans' is a list of vlan offsets which helps us to calculate vlan numbers which will be connected to Eth1/1..Eth1/8 interfaces. +# These offsets are used with vlan_base +# +# Every topology should have a name to distinct one topology from another on the server +# Every topology contains a ptf container which will be used as placeholder for the injected interfaces from VMs, or direct connections to PTF host +# +# To add a topology please use following command +# ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos testbed_add_vm_topology.yml --vault-password-file=~/.password -l server_3 -e vm_set_name=first -e dut_name=str-msn2700-01 -e VM_base=VM0300 -e ptf_ip=10.255.0.255/23 -e topo=t0 -e ptf_imagename="docker_ptf" +# +# Parameters +# -l server_3 - this playbook have to be limited to run only on one server +# -e vm_set_name=first - the name of vm_set +# -e dut_name=str-msn2700-01 - the name of target dut +# -e VM_base=VM0300 - the VM name which is used to as base to calculate VM name for this set +# -e ptf_ip=10.255.0.255/23 - the ip address and prefix of ptf container mgmt interface +# -e topo=t0 - the name of removed topo +# -e ptf_imagename=docker-ptf - name of a docker-image which will be used for the ptf docker container + +- hosts: servers:&vm_host + gather_facts: no + vars_files: + - vars/docker_registry.yml + pre_tasks: + - name: Check for a single host + fail: msg="Please use -l server_X to limit this playbook to one host" + when: "{{ play_hosts|length }} != 1" + + - name: Check that variable vm_set_name is defined + fail: msg="Define vm_set_name variable with -e vm_set_name=something" + when: vm_set_name is not defined + + - name: Check that variable dut_name is defined + fail: msg="Define dut_name variable with -e dut_name=something" + when: dut_name is not defined + + - name: Check that variable VM_base is defined + fail: msg="Define VM_base variable with -e VM_base=something" + when: VM_base is not defined + + - name: Check that variable ptf_ip is defined + fail: msg="Define ptf ip variable with -e ptf_ip=something" + when: ptf_ip is not defined + + - name: Check that variable topo is defined + fail: msg="Define topo variable with -e topo=something" + when: topo is not defined + + - name: Check if it is a known topology + fail: msg="Unknown topology {{ topo }}" + when: topo not in topologies + + - name: Check that variable ptf_imagename is defined + fail: msg="Define ptf_imagename variable with -e ptf_imagename=something" + when: ptf_imagename is not defined + + - name: Load topo variables + include_vars: "vars/topo_{{ topo }}.yml" + + - name: Read dut minigraph + conn_graph_facts: host={{ dut_name }} + connection: local + + roles: + - { role: vm_set, action: 'stop_sonic_vm' } + - { role: vm_set, action: 'start_sonic_vm' } + - { role: vm_set, action: 'add_topo' } diff --git a/ansible/testbed_remove_vm_topology.yml b/ansible/testbed_remove_vm_topology.yml index 7b68b163e68..a05108164ad 100644 --- a/ansible/testbed_remove_vm_topology.yml +++ b/ansible/testbed_remove_vm_topology.yml @@ -54,9 +54,7 @@ conn_graph_facts: host={{ dut_name }} connection: local - - name: Extracting vlan_range - set_fact: vlan_base={{ device_vlan_list | min }} - roles: - { role: vm_set, action: 'remove_topo' } + - { role: vm_set, action: 'stop_sonic_vm' } diff --git a/ansible/testbed_renumber_vm_topology.yml b/ansible/testbed_renumber_vm_topology.yml index 0269e512219..5fe11f05370 100644 --- a/ansible/testbed_renumber_vm_topology.yml +++ b/ansible/testbed_renumber_vm_topology.yml @@ -53,9 +53,6 @@ conn_graph_facts: host={{ dut_name }} connection: local - - name: Extracting vlan_range - set_fact: vlan_base={{ device_vlan_list | min }} - roles: - { role: vm_set, action: 'renumber_topo' } diff --git a/ansible/veos.vtb b/ansible/veos.vtb new file mode 100644 index 00000000000..4ea5a7af40e --- /dev/null +++ b/ansible/veos.vtb @@ -0,0 +1,32 @@ +[vm_host_1] +STR-ACS-VSERV-01 ansible_host=172.17.0.1 ansible_user=use_own_value + +[vm_host:children] +vm_host_1 + +[vms_1] +VM0100 ansible_host=10.250.0.51 +VM0101 ansible_host=10.250.0.52 +VM0102 ansible_host=10.250.0.53 +VM0103 ansible_host=10.250.0.54 + + +[eos:children] +vms_1 + +## The groups below are helper to limit running playbooks to server_1, server_2 or server_3 only +[server_1:children] +vm_host_1 +vms_1 + +[server_1:vars] +host_var_file=host_vars/STR-ACS-VSERV-01.yml + +[servers:children] +server_1 + +[servers:vars] +topologies=['t1', 't1-lag', 't1-64-lag', 't0', 't0-16', 't0-56', 't0-52', 'ptf32', 'ptf64', 't0-64', 't0-64-32', 't0-116'] + +[sonic] +vlab-01 ansible_host=10.250.0.101 type=kvm diff --git a/ansible/vtestbed.csv b/ansible/vtestbed.csv new file mode 100644 index 00000000000..1335c3194c3 --- /dev/null +++ b/ansible/vtestbed.csv @@ -0,0 +1,2 @@ +# conf-name,group-name,topo,ptf_image_name,ptf_ip,server,vm_base,dut,comment +vms-kvm-t0,vms6-1,t0,docker-ptf-sai-brcm,10.250.0.102/24,server_1,VM0100,vlab-01,Tests virtual switch vm