diff --git a/ansible/library/bgp_facts.py b/ansible/library/bgp_facts.py
new file mode 100644
index 00000000000..13902dbce0e
--- /dev/null
+++ b/ansible/library/bgp_facts.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+
+
+DOCUMENTATION = '''
+module: bgp_facts
+version_added: "2.0"
+author: John Arnold (johnar@microsoft.com)
+short_description: Retrieve BGP neighbor information from Quagga
+description:
+ - Retrieve BGP neighbor information from Quagga, using the VTYSH command line
+ - Retrieved facts will be inserted into the 'bgp_neighbors' key
+'''
+
+EXAMPLES = '''
+- name: Get BGP neighbor information
+ bgp_facts:
+'''
+
+# Example of the source data
+'''
+BGP neighbor is 10.0.0.61, remote AS 64015, local AS 65100, external link
+ Description: ARISTA15T0
+ BGP version 4, remote router ID 0.0.0.0
+ BGP state = Active
+ Last read 6d13h16m, hold time is 180, keepalive interval is 60 seconds
+ Message statistics:
+ Inq depth is 0
+ Outq depth is 0
+ Sent Rcvd
+ Opens: 1 1
+ Notifications: 0 0
+ Updates: 6595 3
+ Keepalives: 949 948
+ Route Refresh: 0 0
+ Capability: 0 0
+ Total: 7545 952
+ Minimum time between advertisement runs is 30 seconds
+
+ For address family: IPv4 Unicast
+ Community attribute sent to this neighbor(both)
+ 0 accepted prefixes
+
+ Connections established 1; dropped 1
+ Last reset 6d13h15m, due to
+Next connect timer due in 31 seconds
+Read thread: off Write thread: off
+'''
+
+
+class BgpModule(object):
+ def __init__(self):
+ self.module = AnsibleModule(
+ argument_spec=dict(
+ ),
+ supports_check_mode=True)
+
+ self.out = None
+ self.facts = {}
+
+ return
+
+ def run(self):
+ """
+ Main method of the class
+ """
+ self.collect_neighbors()
+ self.parse_neighbors()
+ self.module.exit_json(ansible_facts=self.facts)
+
+
+ def collect_neighbors(self):
+ """
+ Collect bgp neighbors by reading output of 'vtysh' command line tool
+ """
+ try:
+ rc, self.out, err = self.module.run_command('vtysh -c "show ip bgp neighbors"',
+ executable='/bin/bash', use_unsafe_shell=True)
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
+ (rc, self.out, err))
+
+ return
+
+ def parse_neighbors(self):
+
+ regex_ip = re.compile(r'^BGP neighbor is (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
+ regex_remote_as = re.compile(r'.*remote AS (\d+)')
+ regex_local_as = re.compile(r'.*local AS (\d+)')
+ regex_desc = re.compile(r'.*Description: (.*)')
+ regex_stats = re.compile(r'.*(Opens|Notifications|Updates|Keepalives|Route Refresh|Capability|Total):.*')
+ regex_state = re.compile(r'.*BGP state = (\w+)')
+ regex_mrai = re.compile(r'.*Minimum time between advertisement runs is (\d{1,4})')
+ regex_accepted = re.compile(r'.*(\d+) accepted prefixes')
+ regex_conn_est = re.compile(r'.*Connections established (\d+)')
+ regex_conn_dropped = re.compile(r'.*Connections established \d+; dropped (\d+)')
+ regex_routerid = re.compile(r'.*remote router ID (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
+
+ neighbors = {}
+
+ try:
+ split_output = self.out.split("BGP neighbor")
+
+ for n in split_output:
+
+ # ignore empty rows
+ if 'BGP' in n:
+ neighbor = {}
+ message_stats = {}
+ n = "BGP neighbor" + n
+ lines = n.splitlines()
+
+ for line in lines:
+ if regex_ip.match(line): neighbor_ip = regex_ip.match(line).group(1)
+ if regex_remote_as.match(line): neighbor['remote AS'] = int(regex_remote_as.match(line).group(1))
+ if regex_local_as.match(line): neighbor['local AS'] = int(regex_local_as.match(line).group(1))
+ if regex_desc.match(line): neighbor['description'] = regex_desc.match(line).group(1)
+ if regex_state.match(line): neighbor['state'] = regex_state.match(line).group(1).lower()
+ if regex_mrai.match(line): neighbor['mrai'] = int(regex_mrai.match(line).group(1))
+ if regex_accepted.match(line): neighbor['accepted prefixes'] = int(regex_accepted.match(line).group(1))
+ if regex_conn_est.match(line): neighbor['connections established'] = int(regex_conn_est.match(line).group(1))
+ if regex_conn_dropped.match(line): neighbor['connections dropped'] = int(regex_conn_dropped.match(line).group(1))
+ if regex_routerid.match(line): neighbor['remote routerid'] = regex_routerid.match(line).group(1)
+
+ if regex_stats.match(line):
+ key, values = line.split(':')
+ key = key.lstrip()
+ sent, rcvd = values.split()
+ value_dict = {}
+ value_dict['sent'] = int(sent)
+ value_dict['rcvd'] = int(rcvd)
+ message_stats[key] = value_dict
+
+ if message_stats:
+ neighbor['message statistics'] = message_stats
+
+ neighbors[neighbor_ip] = neighbor
+
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ self.facts['bgp_neighbors'] = neighbors
+
+ return
+
+
+def main():
+ bgp = BgpModule()
+ bgp.run()
+
+ return
+
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/ansible/library/format_bgp_facts.py b/ansible/library/format_bgp_facts.py
new file mode 100644
index 00000000000..9e4d8160932
--- /dev/null
+++ b/ansible/library/format_bgp_facts.py
@@ -0,0 +1,221 @@
+#!/usr/bin/python
+
+
+DOCUMENTATION = '''
+module: format_bgp_facts
+version_added: "2.0"
+author: John Arnold (johnar@microsoft.com)
+short_description: Format BGP neighbor info from different OSes
+description:
+ - Format BGP neighbor information from FTOS, Arista, Nexus etc.
+ - Retrieved facts will be inserted into the 'bgp_neighbors' key
+'''
+
+EXAMPLES = '''
+- name: Format BGP neighbor information
+ format_bgp_facts: bgp_neighbors="{{ result.stdout }}" hw_sku_class="Force10"
+'''
+
+# Example of the source data
+'''
+BGP neighbor is 10.0.0.61, remote AS 64015, local AS 65100, external link
+ Description: ARISTA15T0
+ BGP version 4, remote router ID 0.0.0.0
+ BGP state = Active
+ Last read 6d13h16m, hold time is 180, keepalive interval is 60 seconds
+ Message statistics:
+ Inq depth is 0
+ Outq depth is 0
+ Sent Rcvd
+ Opens: 1 1
+ Notifications: 0 0
+ Updates: 6595 3
+ Keepalives: 949 948
+ Route Refresh: 0 0
+ Capability: 0 0
+ Total: 7545 952
+ Minimum time between advertisement runs is 30 seconds
+
+ For address family: IPv4 Unicast
+ Community attribute sent to this neighbor(both)
+ 0 accepted prefixes
+
+ Connections established 1; dropped 1
+ Last reset 6d13h15m, due to
+Next connect timer due in 31 seconds
+Read thread: off Write thread: off
+'''
+
+
+class BgpModule(object):
+
+
+ def __init__(self):
+ self.module = AnsibleModule(
+ argument_spec=dict(
+ is_sonic=dict(required=False, default=False, type='bool'),
+ hw_sku_class=dict(required=True, choices=['Force10', 'Nexus']),
+ bgp_neighbors_string=dict(required=False),
+ ),
+ supports_check_mode=True)
+
+ self.is_sonic = self.module.params['is_sonic']
+ self.hw_sku_class = self.module.params['hw_sku_class']
+ if self.module.params['bgp_neighbors_string']: self.bgp_neighbors_string = self.module.params['bgp_neighbors_string']
+ self.facts = {}
+
+ return
+
+ def run(self):
+ """
+ Main method of the class
+ """
+
+ if self.bgp_neighbors_string:
+ if self.is_sonic:
+ self.parse_sonic_neighbors()
+ else:
+ self.parse_neighbors()
+
+ self.module.exit_json(ansible_facts=self.facts)
+
+ def parse_sonic_neighbors(self):
+
+ regex_ip = re.compile(r'^BGP neighbor is (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
+ regex_remote_as = re.compile(r'.*remote AS (\d+)')
+ regex_local_as = re.compile(r'.*local AS (\d+)')
+ regex_desc = re.compile(r'.*Description: (.*)')
+ regex_stats = re.compile(r'.*(Opens|Notifications|Updates|Keepalives|Route Refresh|Capability|Total):.*')
+ regex_state = re.compile(r'.*BGP state = (\w+)')
+ regex_mrai = re.compile(r'.*Minimum time between advertisement runs is (\d{1,4})')
+ regex_accepted = re.compile(r'.*(\d+) accepted prefixes')
+ regex_conn_est = re.compile(r'.*Connections established (\d+)')
+ regex_conn_dropped = re.compile(r'.*Connections established \d+; dropped (\d+)')
+ regex_routerid = re.compile(r'.*remote router ID (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
+
+ neighbors = {}
+
+ try:
+ split_output = self.bgp_neighbors_string.split("BGP neighbor")
+
+ for n in split_output:
+
+ # ignore empty rows
+ if 'BGP' in n:
+ neighbor = {}
+ message_stats = {}
+ n = "BGP neighbor" + n
+ lines = n.splitlines()
+
+ for line in lines:
+ if regex_ip.match(line): neighbor_ip = regex_ip.match(line).group(1)
+ if regex_remote_as.match(line): neighbor['remote AS'] = int(regex_remote_as.match(line).group(1))
+ if regex_local_as.match(line): neighbor['local AS'] = int(regex_local_as.match(line).group(1))
+ if regex_desc.match(line): neighbor['description'] = regex_desc.match(line).group(1)
+ if regex_state.match(line): neighbor['state'] = regex_state.match(line).group(1).lower()
+ if regex_mrai.match(line): neighbor['mrai'] = int(regex_mrai.match(line).group(1))
+ if regex_accepted.match(line): neighbor['accepted prefixes'] = int(regex_accepted.match(line).group(1))
+ if regex_conn_est.match(line): neighbor['connections established'] = int(regex_conn_est.match(line).group(1))
+ if regex_conn_dropped.match(line): neighbor['connections dropped'] = int(regex_conn_dropped.match(line).group(1))
+ if regex_routerid.match(line): neighbor['remote routerid'] = regex_routerid.match(line).group(1)
+
+ if regex_stats.match(line):
+ key, values = line.split(':')
+ key = key.lstrip()
+ sent, rcvd = values.split()
+ value_dict = {}
+ value_dict['sent'] = int(sent)
+ value_dict['rcvd'] = int(rcvd)
+ message_stats[key] = value_dict
+
+ if message_stats:
+ neighbor['message statistics'] = message_stats
+
+ neighbors[neighbor_ip] = neighbor
+
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ self.facts['bgp_neighbors'] = neighbors
+
+ return
+
+ def parse_neighbors(self):
+
+
+ regex_ip = re.compile(r'^BGP neighbor is (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
+ regex_remote_as = re.compile(r'.*remote AS (\d+)')
+ regex_local_as = re.compile(r'.*local AS (\d+)')
+ if self.hw_sku_class == "Force10":
+ regex_desc = re.compile(r'.*Description : (.*)')
+ regex_stats = re.compile(r'.*(Opens|Notifications|Updates|Keepalives|Route Refresh|Capability|Total):.*')
+ regex_state = re.compile(r'.*BGP state (\w+)')
+ elif self.hw_sku_class == "Nexus":
+ regex_desc = re.compile(r'.*Description\s*:\s*(.*)')
+ regex_stats = re.compile(r'.*(Opens|Notifications|Updates|Keepalives|Route Refresh|Capability|Total):\s+(\d+)\s+(\d+)')
+ regex_state = re.compile(r'.*BGP state =?\s*(\w+)')
+ regex_mrai = re.compile(r'.*Minimum time between advertisement runs is (\d{1,4})')
+ regex_accepted = re.compile(r'.*Prefixes accepted (\d+)')
+ regex_conn_est = re.compile(r'.*Connections established (\d+)')
+ regex_conn_dropped = re.compile(r'.*Connections established \d+; dropped (\d+)')
+ regex_routerid = re.compile(r'.*remote router ID (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})')
+
+ neighbors = {}
+
+ try:
+ split_output = self.bgp_neighbors_string.split("BGP neighbor")
+
+ for n in split_output:
+
+ # ignore empty rows
+ if 'BGP' in n:
+ neighbor = {}
+ message_stats = {}
+ n = "BGP neighbor" + n
+ lines = n.splitlines()
+
+ for line in lines:
+ if regex_ip.match(line): neighbor_ip = regex_ip.match(line).group(1)
+ if regex_remote_as.match(line): neighbor['remote AS'] = int(regex_remote_as.match(line).group(1))
+ if regex_local_as.match(line): neighbor['local AS'] = int(regex_local_as.match(line).group(1))
+ if regex_desc.match(line): neighbor['description'] = regex_desc.match(line).group(1)
+ if regex_state.match(line): neighbor['state'] = regex_state.match(line).group(1).lower()
+ if regex_mrai.match(line): neighbor['mrai'] = int(regex_mrai.match(line).group(1))
+ if regex_accepted.match(line): neighbor['accepted prefixes'] = int(regex_accepted.match(line).group(1))
+ if regex_conn_est.match(line): neighbor['connections established'] = int(regex_conn_est.match(line).group(1))
+ if regex_conn_dropped.match(line): neighbor['connections dropped'] = int(regex_conn_dropped.match(line).group(1))
+ if regex_routerid.match(line): neighbor['remote routerid'] = regex_routerid.match(line).group(1)
+
+ if regex_stats.match(line):
+ key, values = line.split(':')
+ key = key.lstrip()
+ sent, rcvd = values.split()
+ value_dict = {}
+ value_dict['sent'] = int(sent)
+ value_dict['rcvd'] = int(rcvd)
+ message_stats[key] = value_dict
+
+ if message_stats:
+ neighbor['message statistics'] = message_stats
+
+ neighbors[neighbor_ip] = neighbor
+
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+
+ self.facts['bgp_neighbors'] = neighbors
+
+ return
+
+
+
+def main():
+ bgp = BgpModule()
+ bgp.run()
+
+ return
+
+
+from ansible.module_utils.basic import *
+if __name__ == "__main__":
+ main()
diff --git a/ansible/library/interface_facts.py b/ansible/library/interface_facts.py
new file mode 100644
index 00000000000..fb0b465dce5
--- /dev/null
+++ b/ansible/library/interface_facts.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+import os
+import sys
+import stat
+import array
+import errno
+import fcntl
+import fnmatch
+import glob
+import platform
+import re
+import signal
+import socket
+import struct
+import datetime
+import getpass
+import pwd
+import ConfigParser
+import StringIO
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+
+def get_default_interfaces(ip_path, module):
+ # Use the commands:
+ # ip -4 route get 8.8.8.8 -> Google public DNS
+ # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
+ # to find out the default outgoing interface, address, and gateway
+ command = dict(
+ v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
+ v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
+ )
+ interface = dict(v4 = {}, v6 = {})
+ for key in command.keys():
+ """
+ if key == 'v6' and self.facts['os_family'] == 'RedHat' \
+ and self.facts['distribution_version'].startswith('4.'):
+ continue
+ """
+ if key == 'v6' and not socket.has_ipv6:
+ continue
+ rc, out, err = module.run_command(command[key])
+ if not out:
+ # v6 routing may result in
+ # RTNETLINK answers: Invalid argument
+ continue
+ words = out.split('\n')[0].split()
+ # A valid output starts with the queried address on the first line
+ if len(words) > 0 and words[0] == command[key][-1]:
+ for i in range(len(words) - 1):
+ if words[i] == 'dev':
+ interface[key]['interface'] = words[i+1]
+ elif words[i] == 'src':
+ interface[key]['address'] = words[i+1]
+ elif words[i] == 'via' and words[i+1] != command[key][-1]:
+ interface[key]['gateway'] = words[i+1]
+ return interface['v4'], interface['v6']
+
+def get_file_content(path, default=None, strip=True):
+ data = default
+ if os.path.exists(path) and os.access(path, os.R_OK):
+ try:
+ datafile = open(path)
+ data = datafile.read()
+ if strip:
+ data = data.strip()
+ if len(data) == 0:
+ data = default
+ finally:
+ datafile.close()
+ return data
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ ip_path=dict(required=False, default="/sbin/ip"),),
+ supports_check_mode=False)
+
+ """
+ f = Network(module)
+ #facts = linux_network.populate()
+ results = Tree()
+
+ # results['ansible_interfaces_facts'] = facts
+ module.exit_json(ansible_facts=results)
+
+ """
+ m_args = module.params
+ ip_path = m_args['ip_path']
+ default_ipv4, default_ipv6 = get_default_interfaces(ip_path, module)
+ interfaces = dict()
+ ips = dict(
+ all_ipv4_addresses = [],
+ all_ipv6_addresses = [],
+ )
+
+ #paths = ['/sys/class/net/Ethernet4', '/sys/class/net/lo', '/sys/class/net/eth0']
+ for path in glob.glob('/sys/class/net/*'):
+ #for path in paths:
+ if not os.path.isdir(path):
+ continue
+ device = os.path.basename(path)
+ interfaces[device] = { 'device': device }
+ if os.path.exists(os.path.join(path, 'address')):
+ macaddress = get_file_content(os.path.join(path, 'address'), default='')
+ if macaddress and macaddress != '00:00:00:00:00:00':
+ interfaces[device]['macaddress'] = macaddress
+ if os.path.exists(os.path.join(path, 'mtu')):
+ val = get_file_content(os.path.join(path, 'mtu'))
+ if val != None and True == val.isdigit():
+ interfaces[device]['mtu'] = int(val)
+ if os.path.exists(os.path.join(path, 'operstate')):
+ interfaces[device]['active'] = get_file_content(os.path.join(path, 'operstate')) != 'down'
+
+ if os.path.exists(os.path.join(path, 'carrier')):
+ try:
+ interfaces[device]['link'] = ( get_file_content(os.path.join(path, 'carrier')) == '1')
+ except:
+ pass
+ if os.path.exists(os.path.join(path, 'device','driver', 'module')):
+ interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
+ if os.path.exists(os.path.join(path, 'type')):
+ protocol_type = get_file_content(os.path.join(path, 'type'))
+ if protocol_type == '1':
+ interfaces[device]['type'] = 'ether'
+ elif protocol_type == '512':
+ interfaces[device]['type'] = 'ppp'
+ elif protocol_type == '772':
+ interfaces[device]['type'] = 'loopback'
+ if os.path.exists(os.path.join(path, 'bridge')):
+ interfaces[device]['type'] = 'bridge'
+ interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
+ if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
+ interfaces[device]['id'] = get_file_content(os.path.join(path, 'bridge', 'bridge_id'), default='')
+ if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
+ interfaces[device]['stp'] = get_file_content(os.path.join(path, 'bridge', 'stp_state')) == '1'
+ if os.path.exists(os.path.join(path, 'bonding')):
+ interfaces[device]['type'] = 'bonding'
+ interfaces[device]['slaves'] = get_file_content(os.path.join(path, 'bonding', 'slaves'), default='').split()
+ interfaces[device]['mode'] = get_file_content(os.path.join(path, 'bonding', 'mode'), default='').split()[0]
+ interfaces[device]['miimon'] = get_file_content(os.path.join(path, 'bonding', 'miimon'), default='').split()[0]
+ interfaces[device]['lacp_rate'] = get_file_content(os.path.join(path, 'bonding', 'lacp_rate'), default='').split()[0]
+ primary = get_file_content(os.path.join(path, 'bonding', 'primary'))
+ if primary:
+ interfaces[device]['primary'] = primary
+ path = os.path.join(path, 'bonding', 'all_slaves_active')
+ if os.path.exists(path):
+ interfaces[device]['all_slaves_active'] = get_file_content(path) == '1'
+ if os.path.exists(os.path.join(path,'device')):
+ interfaces[device]['pciid'] = os.path.basename(os.readlink(os.path.join(path,'device')))
+
+ # Check whether an interface is in promiscuous mode
+ if os.path.exists(os.path.join(path,'flags')):
+ promisc_mode = False
+ # The second byte indicates whether the interface is in promiscuous mode.
+ # 1 = promisc
+ # 0 = no promisc
+ data = int(get_file_content(os.path.join(path, 'flags')),16)
+ promisc_mode = (data & 0x0100 > 0)
+ interfaces[device]['promisc'] = promisc_mode
+
+ def parse_ip_output(module, output, secondary=False):
+ for line in output.split('\n'):
+ if not line:
+ continue
+ words = line.split()
+ broadcast = ''
+ if words[0] == 'inet':
+ if len(words) < 2:
+ continue
+ if '/' in words[1]:
+ address, netmask_length = words[1].split('/')
+ if len(words) > 3:
+ broadcast = words[3]
+ else:
+ # pointopoint interfaces do not have a prefix
+ address = words[1]
+ netmask_length = "32"
+ address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
+ netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
+ netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
+ network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
+ iface = words[-1]
+ if iface != device:
+ interfaces[iface] = {}
+ if False == secondary:
+ if "ipv4" not in interfaces[iface]:
+ interfaces[iface]['ipv4'] = {'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network}
+ else:
+ if "ipv4_secondaries" not in interfaces[iface]:
+ interfaces[iface]["ipv4_secondaries"] = []
+
+ interfaces[iface]["ipv4_secondaries"].append({
+ 'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ })
+
+ # add this secondary IP to the main device
+ if secondary:
+ if "ipv4_secondaries" not in interfaces[device]:
+ interfaces[device]["ipv4_secondaries"] = []
+ interfaces[device]["ipv4_secondaries"].append({
+ 'address': address,
+ 'broadcast': broadcast,
+ 'netmask': netmask,
+ 'network': network,
+ })
+
+ # If this is the default address, update default_ipv4
+ if 'address' in default_ipv4 and default_ipv4['address'] == address:
+ default_ipv4['broadcast'] = broadcast
+ default_ipv4['netmask'] = netmask
+ default_ipv4['network'] = network
+ default_ipv4['macaddress'] = macaddress
+ default_ipv4['mtu'] = interfaces[device]['mtu']
+ default_ipv4['type'] = interfaces[device].get("type", "unknown")
+ default_ipv4['alias'] = words[-1]
+ if not address.startswith('127.'):
+ ips['all_ipv4_addresses'].append(address)
+ elif words[0] == 'inet6':
+ address, prefix = words[1].split('/')
+ scope = words[3]
+ if 'ipv6' not in interfaces[device]:
+ interfaces[device]['ipv6'] = []
+ interfaces[device]['ipv6'].append({
+ 'address' : address,
+ 'prefix' : prefix,
+ 'scope' : scope
+ })
+ # If this is the default address, update default_ipv6
+ if 'address' in default_ipv6 and default_ipv6['address'] == address:
+ default_ipv6['prefix'] = prefix
+ default_ipv6['scope'] = scope
+ default_ipv6['macaddress'] = macaddress
+ default_ipv6['mtu'] = interfaces[device]['mtu']
+ default_ipv6['type'] = interfaces[device].get("type", "unknown")
+ if not address == '::1':
+ ips['all_ipv6_addresses'].append(address)
+
+ ip_path = module.get_bin_path("ip")
+
+ args = [ip_path, 'addr', 'show', 'primary', device]
+ rc, stdout, stderr = module.run_command(args)
+ primary_data = stdout
+
+ args = [ip_path, 'addr', 'show', 'secondary', device]
+ rc, stdout, stderr = module.run_command(args)
+ secondary_data = stdout
+
+ parse_ip_output(module, primary_data)
+ parse_ip_output(module, secondary_data, secondary=True)
+
+ results = {}
+
+ results['ansible_interface_facts'] = interfaces
+ results['ansible_interface_ips'] = ips
+ module.exit_json(ansible_facts=results)
+
+main()
+
diff --git a/ansible/library/interface_up_down_data_struct_facts.py b/ansible/library/interface_up_down_data_struct_facts.py
new file mode 100644
index 00000000000..843ab9fc1ad
--- /dev/null
+++ b/ansible/library/interface_up_down_data_struct_facts.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ data=dict(required=False, type='dict', default=None),),
+ supports_check_mode=False)
+
+ m_args = module.params
+ results = {}
+ data = m_args['data']
+
+ data_struct = dict()
+ i = 1
+ device_type = ""
+ for key in data:
+ if key == 'eth0':
+ continue
+ host_ip = data[key]['chassis']['mgmt-ip']
+
+ if host_ip not in data_struct:
+ data_struct = {host_ip:{}}
+ data_struct[host_ip]['nei_interfaces'] = {}
+ data_struct[host_ip]['nei_device_type'] = {}
+ if 'Arista' in data[key]['chassis']['descr']:
+ device_type = 'Arista'
+ elif 'Nexus' in data[key]['chassis']['descr']:
+ device_type = 'Nexus'
+ interface = data[key]['port']['ifname']
+ data_struct[host_ip]['nei_interfaces'][str(i)] = interface
+ data_struct[host_ip]['nei_device_type'] = device_type
+ i = i + 1
+
+ results['ansible_interface_up_down_data_struct_facts'] = data_struct
+ module.exit_json(ansible_facts=results)
+
+main()
diff --git a/ansible/library/lldp_facts.py b/ansible/library/lldp_facts.py
new file mode 100644
index 00000000000..0dc2eb88dc5
--- /dev/null
+++ b/ansible/library/lldp_facts.py
@@ -0,0 +1,266 @@
+#!/usr/bin/env python
+
+DOCUMENTATION = '''
+---
+module: lldp_facts
+version_added: "1.9"
+author: "Samir Jamkhande (samirja@microsoft.com)
+short_description: Retrive LLDP facts for a device using SNMP.
+description:
+ - Retrieve LLDP facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to target snmp server (normally {{inventory_hostname}})
+ required:True
+ version:
+ description:
+ - SNMP Version to use, v2/v2c or v3
+ choices: [ 'v2', 'v2c', 'v3' ]
+ required: true
+ community:
+ description:
+ - The SNMP community string, required if version is v2/v2c
+ required: false
+ level:
+ description:
+ - Authentication level, required if version is v3
+ choices: [ 'authPriv', 'authNoPriv' ]
+ required: false
+ username:
+ description:
+ - Username for SNMPv3, required if version is v3
+ required: false
+ integrity:
+ description:
+ - Hashing algoritm, required if version is v3
+ choices: [ 'md5', 'sha' ]
+ required: false
+ authkey:
+ description:
+ - Authentication key, required if version is v3
+ required: false
+ privacy:
+ description:
+ - Encryption algoritm, required if level is authPriv
+ choices: [ 'des', 'aes' ]
+ required: false
+ privkey:
+ description:
+ - Encryption key, required if version is authPriv
+ required: false
+'''
+
+EXAMPLES = '''
+# Gather LLDP facts with SNMP version 2
+- snmp_facts: host={{ inventory_hostname }} version=2c community=public
+ connection: local
+
+# Gather LLDP facts using SNMP version 3
+- lldp_facts:
+ host={{ inventory_hostname }}
+ version=v3
+ level=authPriv
+ integrity=sha
+ privacy=aes
+ username=snmp-user
+ authkey=abc12345
+ privkey=def6789
+ delegate_to: localhost
+'''
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+
+try:
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ has_pysnmp = True
+except:
+ has_pysnmp = False
+
+class DefineOid(object):
+
+ def __init__(self,dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From IF-MIB
+ self.if_descr = dp + "1.0.8802.1.1.2.1.3.7.1.3"
+
+ # From LLDP-MIB
+ self.lldp_rem_port_id = dp + "1.0.8802.1.1.2.1.4.1.1.7"
+ self.lldp_rem_sys_desc = dp + "1.0.8802.1.1.2.1.4.1.1.10"
+ self.lldp_rem_sys_name = dp + "1.0.8802.1.1.2.1.4.1.1.9"
+ self.lldp_rem_chassis_id = dp + "1.0.8802.1.1.2.1.4.1.1.5"
+
+def get_iftable(snmp_data):
+ """ Gets the interface table (if_index and interface) for a given device
+ for further snmp lookups
+
+ Args:
+ snmp_data - snmp data returned by cmdgen.nextCmd() for mib = .1.3.6.1.2.1.2.2.1.2
+
+ Returns:
+ if_table - dict formatted as if:if_index
+ inverse_if_table - dict formated as if_index:if
+
+ Sample Output:
+ inverse_if_table = {u'719: u'Ethernet4/29/3', u'718':u'Ethernet4/29/2'}
+ if_table = {u'Ethernet4/29/3':u'719', u'Ethernet4/29/2': u'718'}
+ """
+ if_table = dict()
+ inverse_if_table = dict()
+
+ # Populate the if_table dict with parsed output
+ for if_tuple in snmp_data:
+ if_table[str(if_tuple[0][1])] = str(if_tuple[0][0]).split(".")[-1]
+ inverse_if_table[str(if_tuple[0][0]).split(".")[-1]] = str(if_tuple[0][1])
+
+ return (if_table, inverse_if_table)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True),
+ version=dict(required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(required=False, default=False),
+ username=dict(required=False),
+ level=dict(required=False, choices=['authNoPriv', 'authPriv']),
+ integrity=dict(required=False, choices=['md5', 'sha']),
+ privacy=dict(required=False, choices=['des', 'aes']),
+ authkey=dict(required=False),
+ privkey=dict(required=False),
+ removeplaceholder=dict(required=False)),
+ required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
+ supports_check_mode=False)
+
+ m_args = module.params
+
+ if not has_pysnmp:
+ module.fail_json(msg='Missing required pysnmp module (check docs)')
+
+ cmd_gen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] == "v2" or m_args['version'] == "v2c":
+ if not m_args['community']:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] is None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] == None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] == "v2" or m_args['version'] == "v2c":
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ Tree = lambda: defaultdict(Tree)
+
+ results = Tree()
+
+ host = m_args['host']
+
+ error_indication, error_status, error_index, var_binds = cmd_gen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((host, 161)),
+ cmdgen.MibVariable(p.if_descr,)
+ )
+
+ if error_indication:
+ module.fail_json(msg=str(error_indication))
+
+ (if_table, inverse_if_table) = get_iftable(var_binds)
+
+ error_indication, error_status, error_index, var_table = cmd_gen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((host, 161)),
+ cmdgen.MibVariable(p.lldp_rem_port_id,),
+ cmdgen.MibVariable(p.lldp_rem_sys_desc,),
+ cmdgen.MibVariable(p.lldp_rem_sys_name,),
+ cmdgen.MibVariable(p.lldp_rem_chassis_id,),
+ )
+
+ if error_indication:
+ module.fail_json(msg=str(error_indication))
+
+ lldp_rem_sys = dict()
+ lldp_rem_port_id = dict()
+ lldp_rem_chassis_id = dict()
+ lldp_rem_sys_desc = dict()
+
+ vbd = []
+
+ for var_binds in var_table:
+ for oid, val in var_binds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ vbd.append(current_oid)
+ vbd.append(current_val)
+
+ try:
+ if_name = inverse_if_table[str(current_oid.split(".")[-2])]
+ except Exception as e:
+ print json.dumps({
+ "unbound_interface_index": str(current_oid.split(".")[-2])
+ })
+ module.fail_json(msg="unboundinterface in inverse if table")
+
+ if v.lldp_rem_sys_name in current_oid:
+ lldp_rem_sys[if_name] = current_val
+ continue
+ if v.lldp_rem_port_id in current_oid:
+ lldp_rem_port_id[if_name] = current_val
+ continue
+ if v.lldp_rem_chassis_id in current_oid:
+ lldp_rem_chassis_id[if_name] = current_val
+ continue
+ if v.lldp_rem_sys_desc in current_oid:
+ lldp_rem_sys_desc[if_name] = current_val
+ continue
+
+ lldp_data = dict()
+
+ for intf in lldp_rem_sys.viewkeys():
+ lldp_data[intf] = {'neighbor_sys_name': lldp_rem_sys[intf],
+ 'neighbor_port_id': lldp_rem_port_id[intf],
+ 'neighbor_sys_desc': lldp_rem_sys_desc[intf],
+ 'neighbor_chassis_id': lldp_rem_chassis_id[intf]}
+
+
+ results['ansible_lldp_facts'] = lldp_data
+ module.exit_json(ansible_facts=results)
+
+main()
+
diff --git a/ansible/library/sensors_facts.py b/ansible/library/sensors_facts.py
new file mode 100644
index 00000000000..ddb844ef3bc
--- /dev/null
+++ b/ansible/library/sensors_facts.py
@@ -0,0 +1,235 @@
+#!/usr/bin/python
+
+import subprocess
+from ansible.module_utils.basic import *
+
+DOCUMENTATION = '''
+---
+module: sensors_facts
+version_added: "0.2"
+author: Pavel Shirshov (pavelsh@microsoft.com)
+short_description: Retrieve sensors facts for a device. Set alarm if there is hardware alarm
+description:
+ - Checks are defined in ansible variables. Argument for the module is 'checks' with dictionary with parameters.
+ - Retrieved facts will be inserted to the 'sensors' key.
+ - Retrieved raw values will be inserted to the 'raw' key.
+ - Recognized alarms will be inserted to the 'alarms' key.
+ - 'alarm' key will be set to True if the device has any alarm situation.
+ - If there's only one PSU on the device, 'warning' is set to True and 'warnings' have a message about it.
+
+'''
+
+EXAMPLES = '''
+# Gather sensors facts
+ - name: Gather sensors
+ sensors_facts: checks={{ sensors['Force10-S6000'] }}
+ - name: Output of sensors information
+ debug: var=vars['sensors']
+
+'''
+
+# Example of the source data
+'''
+acpitz-virtual-0
+temp1:
+ temp1_input: 26.800
+ temp1_crit: 127.000
+temp2:
+ temp2_input: 26.800
+ temp2_crit: 118.000
+'''
+
+class SensorsModule(object):
+ def __init__(self):
+ self.module = AnsibleModule(
+ argument_spec=dict(
+ checks=dict(required=True, type='dict'),
+ )
+ )
+
+ self.checks = self.module.params['checks']
+
+ self.stdout = None
+ self.skip_devices = set()
+ self.raw = {}
+ self.alarms = {}
+ self.warnings = []
+ self.facts = {
+ 'raw': self.raw,
+ 'alarms': self.alarms,
+ 'warnings': self.warnings,
+ 'alarm': False,
+ 'warning': False,
+ }
+
+ return
+
+ def run(self):
+ '''
+ Main method of the class
+ '''
+ self.collect_sensors()
+ self.parse_sensors()
+ self.psu_check()
+ self.check_alarms()
+ self.module.exit_json(ansible_facts={'sensors': self.facts})
+
+ return
+
+ def collect_sensors(self):
+ '''
+ Collect sensors by reading output of 'sensors' utility
+ '''
+ try:
+ process = subprocess.Popen(['sensors', '-A', '-u'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
+ self.stdout, stderr = process.communicate()
+ ret_code = process.returncode
+ except Exception as e:
+ self.module.fail_json(msg=str(e))
+ else:
+ if ret_code != 0:
+ self.module.fail_json(msg=stderr)
+
+ return
+
+ def parse_sensors(self):
+ '''
+ Parse 'sensors' utility output into the dictionary self.raw
+ '''
+
+ # Return true if the row is an empty line
+ is_empty = lambda row: row == ''
+
+ # Return true if the row is a row which represent device
+ # ('acpitz-virtual-0' in the example above)
+ is_device = lambda row: row[0] != ' ' and row[-1] != ':' and ':' not in row
+
+ # Return true if the row is a row which represent a subsystem of the device
+ # ('temp1:' in the example above)
+ is_subsystem = lambda row: row[0] != ' ' and row[-1] == ':'
+
+ # Return true if the row is a row which represent a sensor value
+ # ('temp1_input: 26.800' in the example above)
+ is_sensor = lambda row: row[0] == ' ' and row[-1] != ':' and ':' in row
+
+ device = None
+ subsystem = None
+ for row in self.stdout.splitlines():
+ if is_empty(row):
+ continue
+ elif is_device(row):
+ device = {}
+ self.raw[row] = device
+ elif is_subsystem(row):
+ subsystem = {}
+ device[row[:-1]] = subsystem
+ elif is_sensor(row):
+ key, value = row.split(':')
+ subsystem[key.strip()] = value.strip()
+
+ return
+
+ def psu_check(self):
+ '''
+ Check that both PSU are presented on the remote system.
+ if it's not true, we set up self.skip_devices set with devices,
+ which should be skipped during checks
+ '''
+
+ for dev, attrs in self.checks['psu_skips'].items():
+ if dev not in self.raw:
+ for idev in attrs['skip_list']:
+ self.skip_devices.add(idev)
+ self.facts['warning'] = True
+ self.warnings.append("PSU #%s [%s] is absent" % (attrs['number'], attrs['side']))
+
+ return
+
+ def get_raw_value(self, path):
+ '''
+ Get value in raw output in the path 'path'
+ '''
+ keys = path.split('/')
+
+ cur_value = self.raw
+ for key in keys:
+ if key in cur_value:
+ cur_value = cur_value[key]
+ else:
+ return None
+
+ return cur_value
+
+ def check_alarms(self):
+ '''
+ Calculate alarm situation using the lists
+ '''
+
+ # Return True if the value should be skipped
+ skip_the_value = lambda path: path.split('/')[0] in self.skip_devices
+
+ # check alarm lists
+ for hw_part, alarm_list in self.checks['alarms'].items():
+ reasons = '%s_reasons' % hw_part
+ self.alarms[hw_part] = False
+ self.alarms[reasons] = []
+ for path in alarm_list:
+ if skip_the_value(path):
+ continue
+ value = self.get_raw_value(path)
+ if value is None:
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Path %s is not exist' % path)
+ elif value != '0.000':
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Alarm on %s' % path)
+
+ # check compare lists
+ for hw_part, compare_list in self.checks['compares'].items():
+ reasons = '%s_reasons' % hw_part
+ for (path_input, path_max) in compare_list:
+ if skip_the_value(path_input):
+ continue
+ value_input = self.get_raw_value(path_input)
+ value_max = self.get_raw_value(path_max)
+ if value_input is None:
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Path %s is not exist' % path_input)
+ elif value_max is None:
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Path %s is not exist' % path_max)
+ elif float(value_input) >= float(value_max) :
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Alarm on %s' % path_input)
+
+ # check not zero lists
+ for hw_part, not_zero_list in self.checks['non_zero'].items():
+ reasons = '%s_reasons' % hw_part
+ for path in not_zero_list:
+ if skip_the_value(path):
+ continue
+ value = self.get_raw_value(path)
+ if value is None:
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Path %s is not exist' % path)
+ elif value == '0.000':
+ self.alarms[hw_part] = True
+ self.facts['alarm'] = True
+ self.alarms[reasons].append('Alarm on %s' % path)
+
+ return
+
+def main():
+ sensors = SensorsModule()
+ sensors.run()
+
+ return
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py
new file mode 100644
index 00000000000..2956e3f4de5
--- /dev/null
+++ b/ansible/library/snmp_facts.py
@@ -0,0 +1,466 @@
+#!/usr/bin/python
+
+# This file is part of Networklore's snmp library for Ansible
+#
+# The module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# The module is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+
+DOCUMENTATION = '''
+---
+module: snmp_facts
+author: Patrick Ogenstad (@networklore)
+notes:
+ - Version 0.7
+short_description: Retrive facts for a device using SNMP.
+description:
+ - Retrieve facts for a device using SNMP, the facts will be
+ inserted to the ansible_facts key.
+requirements:
+ - pysnmp
+options:
+ host:
+ description:
+ - Set to {{ inventory_hostname }}}
+ required: true
+ version:
+ description:
+ - SNMP Version to use, v2/v2c or v3
+ choices: [ 'v2', 'v2c', 'v3' ]
+ required: true
+ community:
+ description:
+ - The SNMP community string, required if version is v2/v2c
+ required: false
+ is_dell:
+ description:
+ - Whether the bos is dell or not
+ required: false
+ level:
+ description:
+ - Authentication level, required if version is v3
+ choices: [ 'authPriv', 'authNoPriv' ]
+ required: false
+ username:
+ description:
+ - Username for SNMPv3, required if version is v3
+ required: false
+ integrity:
+ description:
+ - Hashing algoritm, required if version is v3
+ choices: [ 'md5', 'sha' ]
+ required: false
+ authkey:
+ description:
+ - Authentication key, required if version is v3
+ required: false
+ privacy:
+ description:
+ - Encryption algoritm, required if level is authPriv
+ choices: [ 'des', 'aes' ]
+ required: false
+ privkey:
+ description:
+ - Encryption key, required if version is authPriv
+ required: false
+'''
+
+EXAMPLES = '''
+# Gather facts with SNMP version 2
+- snmp_facts: host={{ inventory_hostname }} version=2c community=public
+
+# Gather facts using SNMP version 3
+- snmp_facts:
+ host={{ inventory_hostname }}
+ version=v3
+ level=authPriv
+ integrity=sha
+ privacy=aes
+ username=snmp-user
+ authkey=abc12345
+ privkey=def6789
+'''
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+
+try:
+ from pysnmp.proto import rfc1902
+ from pysnmp.entity.rfc3413.oneliner import cmdgen
+ from pyasn1.type import univ
+ has_pysnmp = True
+except:
+ has_pysnmp = False
+
+class DefineOid(object):
+
+ def __init__(self,dotprefix=False):
+ if dotprefix:
+ dp = "."
+ else:
+ dp = ""
+
+ # From SNMPv2-MIB
+ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0"
+ self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0"
+ self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0"
+ self.sysContact = dp + "1.3.6.1.2.1.1.4.0"
+ self.sysName = dp + "1.3.6.1.2.1.1.5.0"
+ self.sysLocation = dp + "1.3.6.1.2.1.1.6.0"
+
+ # From IF-MIB
+ self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1"
+ self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2"
+ self.ifMtu = dp + "1.3.6.1.2.1.2.2.1.4"
+ self.ifSpeed = dp + "1.3.6.1.2.1.2.2.1.5"
+ self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6"
+ self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7"
+ self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8"
+ self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18"
+ self.ifInDiscards = dp + "1.3.6.1.2.1.2.2.1.13"
+ self.ifOutDiscards = dp + "1.3.6.1.2.1.2.2.1.19"
+ self.ifInErrors = dp + "1.3.6.1.2.1.2.2.1.14"
+ self.ifOutErrors = dp + "1.3.6.1.2.1.2.2.1.20"
+ self.ifHCInOctets = dp + "1.3.6.1.2.1.31.1.1.1.6"
+ self.ifHCOutOctets = dp + "1.3.6.1.2.1.31.1.1.1.10"
+ self.ifInUcastPkts = dp + "1.3.6.1.2.1.2.2.1.11"
+ self.ifOutUcastPkts= dp + "1.3.6.1.2.1.2.2.1.17"
+
+ # From IP-MIB
+ self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1"
+ self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2"
+ self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3"
+
+ # From Dell Private MIB
+ self.ChStackUnitCpuUtil5sec = dp + "1.3.6.1.4.1.6027.3.10.1.2.9.1.2.1"
+
+
+def decode_hex(hexstring):
+
+ if len(hexstring) < 3:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:].decode("hex")
+ else:
+ return hexstring
+
+def decode_mac(hexstring):
+
+ if len(hexstring) != 14:
+ return hexstring
+ if hexstring[:2] == "0x":
+ return hexstring[2:]
+ else:
+ return hexstring
+
+def lookup_adminstatus(int_adminstatus):
+ adminstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing'
+ }
+ if int_adminstatus in adminstatus_options.keys():
+ return adminstatus_options[int_adminstatus]
+ else:
+ return ""
+
+def lookup_operstatus(int_operstatus):
+ operstatus_options = {
+ 1: 'up',
+ 2: 'down',
+ 3: 'testing',
+ 4: 'unknown',
+ 5: 'dormant',
+ 6: 'notPresent',
+ 7: 'lowerLayerDown'
+ }
+ if int_operstatus in operstatus_options.keys():
+ return operstatus_options[int_operstatus]
+ else:
+ return ""
+
+def decode_type(module, current_oid, val):
+ tagMap = {
+ rfc1902.Counter32.tagSet: long,
+ rfc1902.Gauge32.tagSet: long,
+ rfc1902.Integer32.tagSet: long,
+ rfc1902.IpAddress.tagSet: str,
+ univ.Null.tagSet: str,
+ univ.ObjectIdentifier.tagSet: str,
+ rfc1902.OctetString.tagSet: str,
+ rfc1902.TimeTicks.tagSet: long,
+ rfc1902.Counter64.tagSet: long
+ }
+
+ if val is None or not val:
+ module.fail_json(msg="Unable to convert ASN1 type to python type. No value was returned for OID %s" % current_oid)
+
+ try:
+ pyVal = tagMap[val.tagSet](val)
+ except KeyError as e:
+ module.fail_json(msg="KeyError: Unable to convert ASN1 type to python type. Value: %s" % val)
+
+ return pyVal
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ host=dict(required=True),
+ version=dict(required=True, choices=['v2', 'v2c', 'v3']),
+ community=dict(required=False, default=False),
+ username=dict(required=False),
+ level=dict(required=False, choices=['authNoPriv', 'authPriv']),
+ integrity=dict(required=False, choices=['md5', 'sha']),
+ privacy=dict(required=False, choices=['des', 'aes']),
+ authkey=dict(required=False),
+ privkey=dict(required=False),
+ is_dell=dict(required=False, default=False, type='bool'),
+ removeplaceholder=dict(required=False)),
+ required_together = ( ['username','level','integrity','authkey'],['privacy','privkey'],),
+ supports_check_mode=False)
+
+ m_args = module.params
+
+ if not has_pysnmp:
+ module.fail_json(msg='Missing required pysnmp module (check docs)')
+
+ cmdGen = cmdgen.CommandGenerator()
+
+ # Verify that we receive a community when using snmp v2
+ if m_args['version'] == "v2" or m_args['version'] == "v2c":
+ if m_args['community'] == False:
+ module.fail_json(msg='Community not set when using snmp version 2')
+
+ if m_args['version'] == "v3":
+ if m_args['username'] == None:
+ module.fail_json(msg='Username not set when using snmp version 3')
+
+ if m_args['level'] == "authPriv" and m_args['privacy'] == None:
+ module.fail_json(msg='Privacy algorithm not set when using authPriv')
+
+
+ if m_args['integrity'] == "sha":
+ integrity_proto = cmdgen.usmHMACSHAAuthProtocol
+ elif m_args['integrity'] == "md5":
+ integrity_proto = cmdgen.usmHMACMD5AuthProtocol
+
+ if m_args['privacy'] == "aes":
+ privacy_proto = cmdgen.usmAesCfb128Protocol
+ elif m_args['privacy'] == "des":
+ privacy_proto = cmdgen.usmDESPrivProtocol
+
+ # Use SNMP Version 2
+ if m_args['version'] == "v2" or m_args['version'] == "v2c":
+ snmp_auth = cmdgen.CommunityData(m_args['community'])
+
+ # Use SNMP Version 3 with authNoPriv
+ elif m_args['level'] == "authNoPriv":
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], authProtocol=integrity_proto)
+
+ # Use SNMP Version 3 with authPriv
+ else:
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
+
+ # Use p to prefix OIDs with a dot for polling
+ p = DefineOid(dotprefix=True)
+ # Use v without a prefix to use with return values
+ v = DefineOid(dotprefix=False)
+
+ Tree = lambda: defaultdict(Tree)
+
+ results = Tree()
+
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.sysDescr,),
+ cmdgen.MibVariable(p.sysObjectId,),
+ cmdgen.MibVariable(p.sysUpTime,),
+ cmdgen.MibVariable(p.sysContact,),
+ cmdgen.MibVariable(p.sysName,),
+ cmdgen.MibVariable(p.sysLocation,),
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.sysDescr:
+ results['ansible_sysdescr'] = decode_hex(current_val)
+ elif current_oid == v.sysObjectId:
+ results['ansible_sysobjectid'] = current_val
+ elif current_oid == v.sysUpTime:
+ results['ansible_sysuptime'] = current_val
+ elif current_oid == v.sysContact:
+ results['ansible_syscontact'] = current_val
+ elif current_oid == v.sysName:
+ results['ansible_sysname'] = current_val
+ elif current_oid == v.sysLocation:
+ results['ansible_syslocation'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifIndex,),
+ cmdgen.MibVariable(p.ifDescr,),
+ cmdgen.MibVariable(p.ifMtu,),
+ cmdgen.MibVariable(p.ifSpeed,),
+ cmdgen.MibVariable(p.ifPhysAddress,),
+ cmdgen.MibVariable(p.ifAdminStatus,),
+ cmdgen.MibVariable(p.ifOperStatus,),
+ cmdgen.MibVariable(p.ipAdEntAddr,),
+ cmdgen.MibVariable(p.ipAdEntIfIndex,),
+ cmdgen.MibVariable(p.ipAdEntNetMask,),
+ cmdgen.MibVariable(p.ifAlias,),
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ interface_indexes = []
+
+ all_ipv4_addresses = []
+ ipv4_networks = Tree()
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifIndex in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifindex'] = current_val
+ interface_indexes.append(ifIndex)
+ if v.ifDescr in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['name'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['mtu'] = current_val
+ if v.ifMtu in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['speed'] = current_val
+ if v.ifPhysAddress in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['mac'] = decode_mac(current_val)
+ if v.ifAdminStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val))
+ if v.ifOperStatus in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val))
+ if v.ipAdEntAddr in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['address'] = current_val
+ all_ipv4_addresses.append(current_val)
+ if v.ipAdEntIfIndex in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['interface'] = current_val
+ if v.ipAdEntNetMask in current_oid:
+ curIPList = current_oid.rsplit('.', 4)[-4:]
+ curIP = ".".join(curIPList)
+ ipv4_networks[curIP]['netmask'] = current_val
+ if v.ifAlias in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['description'] = current_val
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ifInDiscards,),
+ cmdgen.MibVariable(p.ifOutDiscards,),
+ cmdgen.MibVariable(p.ifInErrors,),
+ cmdgen.MibVariable(p.ifOutErrors,),
+ cmdgen.MibVariable(p.ifHCInOctets,),
+ cmdgen.MibVariable(p.ifHCOutOctets,),
+ cmdgen.MibVariable(p.ifInUcastPkts,),
+ cmdgen.MibVariable(p.ifOutUcastPkts,),
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ifInDiscards in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifInDiscards'] = current_val
+ if v.ifOutDiscards in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifOutDiscards'] = current_val
+ if v.ifInErrors in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifInErrors'] = current_val
+ if v.ifOutErrors in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifOutErrors'] = current_val
+ if v.ifHCInOctets in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifHCInOctets'] = current_val
+ if v.ifHCOutOctets in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifHCOutOctets'] = current_val
+ if v.ifInUcastPkts in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifInUcastPkts'] = current_val
+ if v.ifOutUcastPkts in current_oid:
+ ifIndex = int(current_oid.rsplit('.', 1)[-1])
+ results['snmp_interfaces'][ifIndex]['ifOutUcastPkts'] = current_val
+
+ interface_to_ipv4 = {}
+ for ipv4_network in ipv4_networks:
+ current_interface = ipv4_networks[ipv4_network]['interface']
+ current_network = {
+ 'address': ipv4_networks[ipv4_network]['address'],
+ 'netmask': ipv4_networks[ipv4_network]['netmask']
+ }
+ if not current_interface in interface_to_ipv4:
+ interface_to_ipv4[current_interface] = []
+ interface_to_ipv4[current_interface].append(current_network)
+ else:
+ interface_to_ipv4[current_interface].append(current_network)
+
+ for interface in interface_to_ipv4:
+ results['snmp_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface]
+
+ results['ansible_all_ipv4_addresses'] = all_ipv4_addresses
+
+ if m_args['is_dell']:
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ChStackUnitCpuUtil5sec,),
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if current_oid == v.ChStackUnitCpuUtil5sec:
+ results['ansible_ChStackUnitCpuUtil5sec'] = decode_type(module, current_oid, val)
+
+ module.exit_json(ansible_facts=results)
+
+
+main()
+
+
+
diff --git a/ansible/library/switch_arptable.py b/ansible/library/switch_arptable.py
new file mode 100644
index 00000000000..68e6e0d4e56
--- /dev/null
+++ b/ansible/library/switch_arptable.py
@@ -0,0 +1,65 @@
+#!/usr/bin/python
+
+DOCUMENTATION = '''
+---
+module: switch_arptble
+version_added: "1.9"
+description:
+ Ansible module retrieves arp table from SONiC switch
+ Depends on /sbin/ip neigh
+output:
+ arptable{
+ "v4":{
+ "10.10.1.3":{
+ "interface": "Ethernet68"
+ "state": "STALE"
+ "macaddress": "00:00:00:01:02:03"
+ },
+ },
+ }
+ TODO: IPV6 neighbor table when we test IPV6
+'''
+
+EXAMPLES = '''
+ switch_arptable:
+'''
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+import socket
+import struct
+import re
+import json
+
+v4host = re.compile('[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
+def parse_arptable(output):
+ v4tbl = {}
+ for line in output.split('\n'):
+ fields = line.split()
+ if len(fields) != 0:
+ if v4host.match(fields[0]):
+ if len(fields) == 4:
+ v4tbl[fields[0]]={'interface': fields[2], 'state':fields[3], 'macaddress': 'None'}
+ if len(fields) > 4:
+ v4tbl[fields[0]]={'interface': fields[2], 'state':fields[5], 'macaddress': fields[4]}
+ arps = {'v4':v4tbl}
+ return arps
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(),
+ supports_check_mode=False)
+
+ rt, out, err = module.run_command("ip neigh")
+ if rt != 0:
+ self.module.fail_json(msg="Command 'ip neigh' failed rc=%d, out=%s, err=%s" %(rt, out, err))
+
+ arp_tbl = parse_arptable(out)
+
+ if arp_tbl == None:
+ self.module.fail_json(msg="Parse arp table failed??")
+
+ module.exit_json(changed=False, arptable=arp_tbl)
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible/library/switch_tables.py b/ansible/library/switch_tables.py
new file mode 100644
index 00000000000..d9c46e96372
--- /dev/null
+++ b/ansible/library/switch_tables.py
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+
+# Note:
+# Do not use 'env python' in shebang because Ansbile parses it straightforwardly and try to
+# replace it with var ansible_python_interpreter. We exploit this var to implement docker exec support.
+#
+# ref: https://github.com/ansible/ansible/blob/devel/lib/ansible/executor/module_common.py
+
+DOCUMENTATION = '''
+---
+module: switch_tables
+version_added: "1.9"
+short_description: Retrieve layer 3 tables
+description:
+ - Retrieve route, neighbor, nexthop, nexthopgroup table from ACS device
+
+ Table format:
+ results[route][prefix] = nhid/nhgid
+ results[neighbor][ip] = mac
+ results[nexthop][ip] = nhid
+ results[nexthopgroup][nhgid] = [nhids]
+'''
+
+EXAMPLES = '''
+# Retrieve l3table and egress table
+- name: Get ASIC l3table and egress table
+ switch_tables: l3table=yes egress=yes asic=broadcom
+'''
+
+from collections import defaultdict
+import json
+import re
+import socket
+
+
+
+# MELLANOX SECTION #
+####################
+
+def general_parse_log(output, keyword):
+ list = []
+ for line in output.split('\n'):
+ line += " "
+ if line.split(" ")[0].strip() == keyword:
+ list.append(line)
+ else:
+ line = line.replace(" ", "")
+ if keyword != "neighbor":
+ line = line.replace(":", "=", 1)
+ line += " "
+ if len(list) > 0:
+ list[-1] = list[-1] + line
+ return list
+
+
+def convert_hex_to_ip(ip):
+ ip = int(ip, 16)
+ a = ip & 0x000000ff
+ b = (ip & 0x0000ff00) >> 8
+ c = (ip & 0x00ff0000) >> 16
+ d = (ip & 0xff000000) >> 24
+ str_to_ret = str(d) + "." + str(c) + "." + str(b) + "." + str(a)
+ return str_to_ret
+
+
+def parse_neighbors(output):
+ table = {}
+ neighbours = general_parse_log(output, "neighbor")
+ for neigh in neighbours:
+ mac = 0
+ neigh_attributes = []
+ neigh = neigh[12:-1]
+ for word in neigh.split(" "):
+ neigh_attributes.append(word.strip())
+ for item in neigh_attributes:
+ if item.split("=")[0].strip() == "mac_addr":
+ mac = item.split("=")[1].strip()
+ for item in neigh_attributes:
+ if item.split("=")[0].strip() == "s_addr":
+ table[convert_hex_to_ip(item.split("=")[1].strip())] = mac
+ return table
+
+
+def parse_ecmp_id(output):
+ ecmp_nh = general_parse_log(output, "next")
+ nh_list = []
+ for ecmp in ecmp_nh:
+ ecmp_attributes = []
+ ecmp = ecmp[11:-1]
+ for word in ecmp.split(" "):
+ ecmp_attributes.append(word.strip())
+ nh_list.append(convert_hex_to_ip(ecmp_attributes[2].split("=")[1].strip()))
+ return nh_list
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ asic=dict(required=True, choices=['mellanox', 'broadcom']),
+ route=dict(required=False, default=False, choices=BOOLEANS),
+ neighbor=dict(required=False, default=False, choices=BOOLEANS),
+ nexthop=dict(required=False, default=False, choices=BOOLEANS),
+ nexthopgroup=dict(required=False, default=False, choices=BOOLEANS)),
+ supports_check_mode=True)
+
+ results = dict()
+
+ if module.params['asic'] == 'broadcom':
+ self.module.fail_json(msg="Broadcom support missing.")
+
+
+ if module.params['asic'] == 'mellanox':
+
+ rc, out, err = module.run_command("/usr/local/bin/sx_api_router_uc_routes_dump_all.py")
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
+ (rc, out, err))
+ routes_table = {}
+ nhg_table = {}
+ ecmp_ids = []
+ routes = general_parse_log(out, "route")
+ for route_attr in routes:
+ attributes = []
+ is_prefix = None
+ route_attr = route_attr[9:-1]
+ for word in route_attr.split(" "):
+ attributes.append(word)
+ if attributes[6].split("=")[0].strip() == "nexthoplist" and attributes[7].split("=")[0].strip() == "hop0":
+ attributes[6:10] = [''.join(attributes[6:10])]
+ for word in route_attr.split(" "):
+ if word.split("=")[0].strip() == "s_addr" and is_prefix is None:
+ route = convert_hex_to_ip(word.split("=")[1])
+ is_prefix = True
+ elif word.split("=")[0].strip() == "type" and word.split("=")[1].strip() == "NEXT_HOP":
+ for attr in attributes:
+ if attr.split("=")[0].strip() == "nexthoplist" and attr.split("=")[1].strip() != "":
+ routes_table[route] = convert_hex_to_ip(attr.split("=")[4].strip())
+
+ elif attr.split("=")[0].strip() == "ecmp_id" and attr.split("=")[1].strip() != "0":
+ routes_table[route] = attr.split("=")[1].strip()
+ if attr.split("=")[1].strip() not in ecmp_ids:
+ ecmp_ids.append(attr.split("=")[1].strip())
+
+ for id in ecmp_ids:
+ path = "/usr/local/bin/sx_api_router_ecmp_dump.py " + str(id)
+ rc, out, err = module.run_command(path)
+ if rc != 0:
+ module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
+ (rc, out, err))
+ nhg_table[id] = parse_ecmp_id(out)
+
+ rc, out, err = module.run_command("/usr/local/bin/sx_api_router_neigh_dump.py")
+ if rc != 0:
+ self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
+ (rc, out, err))
+ neighbors_table = parse_neighbors(out)
+
+ if module.params['neighbor']:
+ results['neighbor'] = neighbors_table
+
+ if module.params['nexthopgroup']:
+ results['nexthopgroup'] = nhg_table
+
+ if module.params['nexthop']:
+ results['nexthop'] = dict()
+ for ip in routes_table:
+ if routes_table[ip] not in nhg_table:
+ results['nexthop'][ip] = routes_table[ip]
+ for nh in nhg_table:
+ for ip in nhg_table[nh]:
+ results['nexthop'][ip] = ip
+
+ if module.params['route']:
+ results['route'] = routes_table
+
+ module.exit_json(ansible_facts=results)
+
+
+from ansible.module_utils.basic import *
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible/library/syslog_server.py b/ansible/library/syslog_server.py
new file mode 100644
index 00000000000..9f5575b8b55
--- /dev/null
+++ b/ansible/library/syslog_server.py
@@ -0,0 +1,86 @@
+#!/usr/bin/python
+
+DOCUMENTATION = '''
+---
+module: syslog_server
+version_added: "1.0"
+author: John Arnold (johnar@microsoft.com)
+short_description: Receive Syslog messages
+description:
+ - Start a Syslog listener, receive syslog messages and return them.
+options:
+'''
+
+EXAMPLES = '''
+# Receive Syslog messages
+- name: Receive Syslog Messages
+ syslog_server:
+'''
+
+from ansible.module_utils.basic import *
+from collections import defaultdict
+import socket
+import struct
+import re
+import json
+import time
+import SocketServer
+import threading
+
+
+#HOST, PORT = "0.0.0.0", 5514
+
+queuedOutput = []
+
+
+class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
+ pass
+
+
+class ThreadedUDPRequestHandler(SocketServer.BaseRequestHandler):
+
+ def handle(self):
+ data = bytes.decode(self.request[0].strip())
+ socket = self.request[1]
+
+ newLogString = "%s %s %s\n" % ( time.time(), self.client_address[0], data)
+
+ global queuedOutput
+ queuedOutput.append(newLogString)
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ timeout=dict(required=False, default=30),
+ port=dict(required=False, default=5514),
+ host=dict(required=False, default="0.0.0.0")
+ ),
+ supports_check_mode=False)
+
+ args = module.params
+
+ try:
+ server = ThreadedUDPServer((args['host'],int(args['port'])), ThreadedUDPRequestHandler)
+ server.allow_reuse_address=True
+
+ server_thread = threading.Thread(target=server.serve_forever)
+ server_thread.daemon = True
+ server_thread.start()
+
+ time.sleep(float(args['timeout']))
+ server.shutdown()
+
+ except Exception, e:
+ module.fail_json(msg = str(e))
+
+ Tree = lambda: defaultdict(Tree)
+ results = Tree()
+
+ global queuedOutput
+ results['syslog_messages'] = queuedOutput
+
+ module.exit_json(ansible_facts=results)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/ansible/roles/test/files/acstests/acs_base_test.py b/ansible/roles/test/files/acstests/acs_base_test.py
new file mode 100644
index 00000000000..b77680006bd
--- /dev/null
+++ b/ansible/roles/test/files/acstests/acs_base_test.py
@@ -0,0 +1,41 @@
+"""
+Base classes for test cases
+
+Tests will usually inherit from one of these classes to have the controller
+and/or dataplane automatically set up.
+"""
+
+import ptf
+from ptf.base_tests import BaseTest
+from ptf import config
+import ptf.testutils as testutils
+
+################################################################
+#
+# Thrift interface base tests
+#
+################################################################
+
+class ACSDataplaneTest(BaseTest):
+
+ def setUp(self):
+ BaseTest.setUp(self)
+
+ self.test_params = testutils.test_params_get()
+ print "You specified the following test-params when invoking ptf:"
+ print self.test_params
+
+ # shows how to use a filter on all our tests
+ testutils.add_filter(testutils.not_ipv6_filter)
+
+ self.dataplane = ptf.dataplane_instance
+ self.dataplane.flush()
+ if config["log_dir"] != None:
+ filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
+ self.dataplane.start_pcap(filename)
+
+ def tearDown(self):
+ if config["log_dir"] != None:
+ self.dataplane.stop_pcap()
+ testutils.reset_filters()
+ BaseTest.tearDown(self)
diff --git a/ansible/roles/test/files/acstests/arptest.py b/ansible/roles/test/files/acstests/arptest.py
new file mode 100644
index 00000000000..ff1742148a0
--- /dev/null
+++ b/ansible/roles/test/files/acstests/arptest.py
@@ -0,0 +1,158 @@
+'''
+Test correct kernel ARP behavior
+'''
+
+import ptf.packet as scapy
+import ptf.dataplane as dataplane
+import acs_base_test
+from ptf.testutils import *
+from ptf.mask import Mask
+
+class ExpectReply(acs_base_test.ACSDataplaneTest):
+ '''
+ Test correct ARP behavior, make sure ACS is replying ARP request for local interface IP address
+ ACS switch should reply ARP and update ARP table entry to correct peer MAC address
+ '''
+ def runTest(self):
+ acs_mac = self.test_params['acs_mac']
+ pkt = simple_arp_packet(pktlen=60,
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src='00:06:07:08:09:0a',
+ vlan_vid=0,
+ vlan_pcp=0,
+ arp_op=1,
+ ip_snd='10.10.1.3',
+ ip_tgt='10.10.1.2',
+ hw_snd='00:06:07:08:09:0a',
+ hw_tgt='ff:ff:ff:ff:ff:ff',
+ )
+ exp_pkt = simple_arp_packet(eth_dst='00:06:07:08:09:0a',
+ eth_src=acs_mac,
+ arp_op=2,
+ ip_snd='10.10.1.2',
+ ip_tgt='10.10.1.3',
+ hw_tgt='00:06:07:08:09:0a',
+ hw_snd=acs_mac,
+ )
+ send_packet(self, 1, pkt)
+ verify_packet(self, exp_pkt, 1)
+
+class WrongIntNoReply(acs_base_test.ACSDataplaneTest):
+ '''
+ Test ARP packet from other(wrong) interface with dest IP address as local interface IP address
+ ACS should not reply to such ARP request
+ '''
+ def runTest(self):
+ acs_mac = self.test_params['acs_mac']
+ pkt = simple_arp_packet(pktlen=60,
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src='00:02:07:08:09:0a',
+ vlan_vid=0,
+ vlan_pcp=0,
+ arp_op=1,
+ ip_snd='10.10.1.4',
+ ip_tgt='10.10.1.2',
+ hw_snd='00:02:07:08:09:0a',
+ hw_tgt='ff:ff:ff:ff:ff:ff',
+ )
+ exp_pkt = simple_arp_packet(eth_dst='00:02:07:08:09:0a',
+ eth_src=acs_mac,
+ arp_op=2,
+ ip_snd='10.10.1.2',
+ ip_tgt='10.10.1.4',
+ hw_tgt='00:02:07:08:09:0a',
+ hw_snd=acs_mac,
+ )
+ send_packet(self, 2, pkt)
+ ports = ptf_ports()
+ verify_no_packet_any(self, exp_pkt, ports)
+
+class SrcOutRangeNoReply(acs_base_test.ACSDataplaneTest):
+ '''
+ Test incoming ARP request src IP address is not within local interface subnet, even the destination address match
+ ACS should not reply such ARP request and should not add ARP table entry either
+ '''
+ def runTest(self):
+ acs_mac = self.test_params['acs_mac']
+ pkt = simple_arp_packet(pktlen=60,
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src='00:03:07:08:09:0a',
+ vlan_vid=0,
+ vlan_pcp=0,
+ arp_op=1,
+ ip_snd='10.10.1.22',
+ ip_tgt='10.10.1.2',
+ hw_snd='00:03:07:08:09:0a',
+ hw_tgt='ff:ff:ff:ff:ff:ff',
+ )
+ exp_pkt = simple_arp_packet(eth_dst='00:03:07:08:09:0a',
+ eth_src=acs_mac,
+ arp_op=2,
+ ip_snd='10.10.1.22',
+ ip_tgt='10.10.1.20',
+ hw_tgt='00:03:07:08:09:0a',
+ hw_snd=acs_mac,
+ )
+ send_packet(self, 1, pkt)
+ verify_no_packet(self, exp_pkt, 1)
+
+class GarpNoUpdate(acs_base_test.ACSDataplaneTest):
+ '''
+ When receiving gratuitous ARP packet, if it was not resolved in ARP table before,
+ ACS should discard the request and won't add ARP entry for the GARP
+ '''
+ def runTest(self):
+ acs_mac = self.test_params['acs_mac']
+ pkt = simple_arp_packet(pktlen=60,
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src='00:05:07:08:09:0a',
+ vlan_vid=0,
+ vlan_pcp=0,
+ arp_op=1,
+ ip_snd='10.10.1.7',
+ ip_tgt='10.10.1.7',
+ hw_snd='00:05:07:08:09:0a',
+ hw_tgt='ff:ff:ff:ff:ff:ff',
+ )
+ exp_pkt = simple_arp_packet(eth_dst='00:05:07:08:09:0a',
+ eth_src=acs_mac,
+ arp_op=2,
+ ip_snd='10.10.1.2',
+ ip_tgt='10.10.1.7',
+ hw_tgt='00:05:07:08:09:0a',
+ hw_snd=acs_mac,
+ )
+ send_packet(self, 1, pkt)
+ verify_no_packet(self, exp_pkt, 1)
+
+
+class GarpUpdate(acs_base_test.ACSDataplaneTest):
+ '''
+ When receiving gratuitous ARP packet, if it was resolved in ARP table before,
+ ACS should update ARP entry with new mac
+ '''
+ def runTest(self):
+ acs_mac = self.test_params['acs_mac']
+ pkt = simple_arp_packet(pktlen=60,
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src='00:00:07:08:09:0a',
+ vlan_vid=0,
+ vlan_pcp=0,
+ arp_op=1,
+ ip_snd='10.10.1.3',
+ ip_tgt='10.10.1.3',
+ hw_snd='00:00:07:08:09:0a',
+ hw_tgt='ff:ff:ff:ff:ff:ff',
+ )
+ exp_pkt = simple_arp_packet(eth_dst='00:00:07:08:09:0a',
+ eth_src=acs_mac,
+ arp_op=2,
+ ip_snd='10.10.1.2',
+ ip_tgt='10.10.1.3',
+ hw_tgt='00:00:07:08:09:0a',
+ hw_snd=acs_mac,
+ )
+ send_packet(self, 1, pkt)
+ verify_no_packet(self, exp_pkt, 1)
+
+
diff --git a/ansible/roles/test/files/acstests/dscp_ecn_send.py b/ansible/roles/test/files/acstests/dscp_ecn_send.py
new file mode 100644
index 00000000000..2de7e2ca96e
--- /dev/null
+++ b/ansible/roles/test/files/acstests/dscp_ecn_send.py
@@ -0,0 +1,41 @@
+"""
+ACS Dataplane Qos tests
+"""
+
+import time
+import logging
+import ptf.packet as scapy
+
+import ptf.dataplane as dataplane
+import acs_base_test
+
+from ptf.testutils import *
+from ptf.mask import Mask
+
+class DscpEcnSend(acs_base_test.ACSDataplaneTest):
+ def runTest(self):
+
+ src_mac = [None, None]
+ src_mac[0] = self.dataplane.get_mac(0, 0)
+
+ router_mac = self.test_params['router_mac']
+ dscp = self.test_params['dscp']
+ tos = dscp << 2
+ tos |= self.test_params['ecn']
+ ip_src = '10.0.0.1' if 'ip_src' not in self.test_params else self.test_params['ip_src']
+ ip_dst = '10.0.0.3' if 'ip_dst' not in self.test_params else self.test_params['ip_dst']
+ for i in range(0, self.test_params['packet_num']):
+ pkt = simple_tcp_packet(eth_dst=router_mac,
+ eth_src=src_mac[0],
+ ip_src=ip_src,
+ ip_dst=ip_dst,
+ ip_tos=tos,
+ ip_id=i,
+ ip_ttl=64)
+ send_packet(self, 0, pkt)
+
+ leaking_pkt_number = 0
+ for (rcv_port_number, pkt_str, pkt_time) in self.dataplane.packets(0, 1):
+ leaking_pkt_number += 1
+
+ print "leaking packet %d" % leaking_pkt_number
diff --git a/ansible/roles/test/files/acstests/dscp_mapping.py b/ansible/roles/test/files/acstests/dscp_mapping.py
new file mode 100644
index 00000000000..e665bfa1e3a
--- /dev/null
+++ b/ansible/roles/test/files/acstests/dscp_mapping.py
@@ -0,0 +1,62 @@
+"""
+ACS Dataplane Qos tests
+"""
+
+import time
+import logging
+import ptf.packet as scapy
+
+import ptf.dataplane as dataplane
+import acs_base_test
+
+from ptf.testutils import *
+from ptf.mask import Mask
+
+class ArpPopulate(acs_base_test.ACSDataplaneTest):
+ def runTest(self):
+
+ router_mac = self.test_params['router_mac']
+
+ index = 0
+ for port in ptf_ports():
+ arpreq_pkt = simple_arp_packet(
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src=self.dataplane.get_mac(port[0], port[1]),
+ arp_op=1,
+ ip_snd='10.0.0.%d' % (index * 2 + 1),
+ ip_tgt='10.0.0.%d' % (index * 2),
+ hw_snd=self.dataplane.get_mac(port[0], port[1]),
+ hw_tgt='ff:ff:ff:ff:ff:ff')
+ send_packet(self, port[1], arpreq_pkt)
+ index += 1
+
+class DscpMappingTest(acs_base_test.ACSDataplaneTest):
+ def runTest(self):
+
+ router_mac = self.test_params['router_mac']
+
+ src_mac = [None, None]
+ src_mac[0] = self.dataplane.get_mac(0, 0)
+ src_mac[1] = self.dataplane.get_mac(0, 1)
+
+ for dscp in range(0, 64):
+ print "Sending L3 packet port 0 -> port 1, dscp %d" % dscp
+ tos = dscp << 2
+ pkt = simple_tcp_packet(eth_dst=router_mac,
+ eth_src=src_mac[0],
+ ip_src='10.0.0.1',
+ ip_dst='10.0.0.3',
+ ip_tos=tos,
+ ip_id=101,
+ ip_ttl=64)
+
+ exp_pkt = simple_tcp_packet(eth_dst=src_mac[1],
+ eth_src=router_mac,
+ ip_src='10.0.0.1',
+ ip_dst='10.0.0.3',
+ ip_tos=tos,
+ ip_id=101,
+ ip_ttl=63)
+
+ send_packet(self, 0, pkt)
+ verify_packets(self, exp_pkt, ports=[1])
diff --git a/ansible/roles/test/files/helpers/add_ip.sh b/ansible/roles/test/files/helpers/add_ip.sh
new file mode 100755
index 00000000000..28319253caa
--- /dev/null
+++ b/ansible/roles/test/files/helpers/add_ip.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+set -e
+
+for i in $(seq 0 31); do
+ last_el=$((1+i*2))
+ ip address add 10.0.0.$last_el/31 dev eth$i
+done
diff --git a/ansible/roles/test/files/helpers/add_routes.sh b/ansible/roles/test/files/helpers/add_routes.sh
new file mode 100755
index 00000000000..84d949fa026
--- /dev/null
+++ b/ansible/roles/test/files/helpers/add_routes.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+
+for j in `seq 0 15`; do
+ if ! (route | grep "172.16.$j.0"); then
+ cmd="ip route add 172.16.$j.0/24 nexthop via 10.0.0.33 "
+ for i in `seq 1 15`; do
+ cmd+="nexthop via 10.0.0.$((32+2*$i+1)) "
+ done
+ $cmd
+ fi
+done
diff --git a/ansible/roles/test/files/helpers/remove_ip.sh b/ansible/roles/test/files/helpers/remove_ip.sh
new file mode 100755
index 00000000000..02ddffd7ec6
--- /dev/null
+++ b/ansible/roles/test/files/helpers/remove_ip.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+set -e
+
+for i in $(seq 0 31); do
+ ip address flush dev eth$i
+done
diff --git a/ansible/roles/test/files/helpers/remove_routes.sh b/ansible/roles/test/files/helpers/remove_routes.sh
new file mode 100755
index 00000000000..3e28a7b24e7
--- /dev/null
+++ b/ansible/roles/test/files/helpers/remove_routes.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+
+for j in `seq 0 15`; do
+ if (route | grep "172.16.$j.0"); then
+ cmd="ip route del 172.16.$j.0/24 nexthop via 10.0.0.33 "
+ for i in `seq 1 15`; do
+ cmd+="nexthop via 10.0.0.$((32+2*$i+1)) "
+ done
+ $cmd
+ fi
+done
diff --git a/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini b/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini
new file mode 100644
index 00000000000..8ca7d1a774d
--- /dev/null
+++ b/ansible/roles/test/files/mlnx/default_interface_to_front_map.ini
@@ -0,0 +1,33 @@
+# ptf host interface @ switch front port name
+0@Ethernet1
+1@Ethernet2
+2@Ethernet3
+3@Ethernet4
+4@Ethernet5
+5@Ethernet6
+6@Ethernet7
+7@Ethernet8
+8@Ethernet9
+9@Ethernet10
+10@Ethernet11
+11@Ethernet12
+12@Ethernet13
+13@Ethernet14
+14@Ethernet15
+15@Ethernet16
+16@Ethernet17
+17@Ethernet18
+18@Ethernet19
+19@Ethernet20
+20@Ethernet21
+21@Ethernet22
+22@Ethernet23
+23@Ethernet24
+24@Ethernet25
+25@Ethernet26
+26@Ethernet27
+27@Ethernet28
+28@Ethernet29
+29@Ethernet30
+30@Ethernet31
+31@Ethernet32
\ No newline at end of file
diff --git a/ansible/roles/test/files/saitests/copp_tests.py b/ansible/roles/test/files/saitests/copp_tests.py
new file mode 100644
index 00000000000..b45fe188298
--- /dev/null
+++ b/ansible/roles/test/files/saitests/copp_tests.py
@@ -0,0 +1,349 @@
+# ptf --test-dir saitests copp_tests --qlen=10000 --platform nn -t "verbose=True;dst_mac='00:02:03:04:05:00'" --device-socket 0-3@tcp://127.0.0.1:10900 --device-socket 1-3@tcp://10.3.147.47:10900
+#
+# copp_test.${name_test}
+#
+# ARPTest
+# DHCPTest
+# LLDPTest
+# BGPTest
+# LACPTest
+# SNMPTest
+# SSHTest
+# IP2METest
+# DefaultTest
+
+import ptf
+from ptf.base_tests import BaseTest
+from ptf import config
+import ptf.testutils as testutils
+from ptf.testutils import *
+from ptf.dataplane import match_exp_pkt
+import datetime
+import subprocess
+
+
+class ControlPlaneBaseTest(BaseTest):
+ MAX_PORTS = 32
+ PPS_LIMIT = 600
+ PPS_LIMIT_MIN = PPS_LIMIT * 0.9
+ PPS_LIMIT_MAX = PPS_LIMIT * 1.1
+ NO_POLICER_LIMIT = PPS_LIMIT * 1.4
+ PKT_TX_COUNT = 5000
+ PKT_RX_LIMIT = PKT_TX_COUNT * 0.90
+
+ def __init__(self):
+ BaseTest.__init__(self)
+ self.test_params = testutils.test_params_get()
+
+ self.mac_map = []
+ for i in xrange(self.MAX_PORTS):
+ output = ControlPlaneBaseTest.cmd_run('ip link show dev eth%d' % (i))
+ second = output.split('\n')[1]
+ mac = second.split()[1]
+ self.mac_map.append(mac)
+
+ self.myip = {}
+ self.peerip = {}
+ for i in xrange(self.MAX_PORTS):
+ self.myip[i] = "10.0.0.%d" % (i*2+1)
+ self.peerip[i] = "10.0.0.%d" % (i*2)
+
+ return
+
+ @staticmethod
+ def cmd_run(cmdline):
+ cmd = cmdline.split(' ')
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout, stderr = process.communicate()
+ ret_code = process.returncode
+
+ if ret_code != 0:
+ raise Exception("ret_code=%d, error message=%s. cmd=%s" % (ret_code, stderr, cmdline))
+
+ return stdout
+
+ def setUp(self):
+ self.dataplane = ptf.dataplane_instance
+ self.dataplane.flush()
+ if config["log_dir"] != None:
+ filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
+ self.dataplane.start_pcap(filename)
+
+ def tearDown(self):
+ if config["log_dir"] != None:
+ self.dataplane.stop_pcap()
+
+ def copp_test(self, packet, count, send_intf, recv_intf):
+ start_time=datetime.datetime.now()
+
+ for i in xrange(count):
+ testutils.send_packet(self, send_intf, packet)
+
+ end_time=datetime.datetime.now()
+
+ total_rcv_pkt_cnt = 0
+ while True:
+ (rcv_device, rcv_port, rcv_pkt, pkt_time) = dp_poll(self, device_number=recv_intf[0], port_number=recv_intf[1], timeout=1)
+ if rcv_pkt is not None:
+ if match_exp_pkt(packet, rcv_pkt):
+ total_rcv_pkt_cnt += 1
+ else:
+ break
+
+ time_delta = end_time - start_time
+ time_delta_ms = (time_delta.microseconds + time_delta.seconds * 10**6) / 10**3
+ tx_pps = int(count/(float(time_delta_ms)/1000))
+ rx_pps = int(total_rcv_pkt_cnt/(float(time_delta_ms)/1000))
+
+ return total_rcv_pkt_cnt, time_delta, time_delta_ms, tx_pps, rx_pps
+
+ def contruct_packet(self, port_number):
+ raise NotImplemented
+
+ def check_constraints(self, total_rcv_pkt_cnt, time_delta_ms, rx_pps):
+ raise NotImplemented
+
+ def one_port_test(self, port_number):
+ packet = self.contruct_packet(port_number)
+ total_rcv_pkt_cnt, time_delta, time_delta_ms, tx_pps, rx_pps = self.copp_test(packet, self.PKT_TX_COUNT, (0, port_number), (1, port_number))
+ self.printStats(self.PKT_TX_COUNT, total_rcv_pkt_cnt, time_delta, tx_pps, rx_pps)
+ self.check_constraints(total_rcv_pkt_cnt, time_delta_ms, rx_pps)
+
+ return
+
+ def run_suite(self):
+ self.one_port_test(3)
+
+ def printStats(self, pkt_send_count, total_rcv_pkt_cnt, time_delta, tx_pps, rx_pps):
+ if not(('verbose' in self.test_params) and (self.test_params['verbose'] == True)):
+ return
+ print 'test stats'
+ print 'Packet sent = %10d' % pkt_send_count
+ print 'Packet rcvd = %10d' % total_rcv_pkt_cnt
+ print 'Test time = %s' % str(time_delta)
+ print 'TX PPS = %d' % tx_pps
+ print 'RX PPS = %d' % rx_pps
+
+ return
+
+class NoPolicyTest(ControlPlaneBaseTest):
+ def __init__(self):
+ ControlPlaneBaseTest.__init__(self)
+
+ def check_constraints(self, total_rcv_pkt_cnt, time_delta_ms, rx_pps):
+ assert(rx_pps > self.NO_POLICER_LIMIT)
+ assert(total_rcv_pkt_cnt > self.PKT_RX_LIMIT)
+
+class PolicyTest(ControlPlaneBaseTest):
+ def __init__(self):
+ ControlPlaneBaseTest.__init__(self)
+
+ def check_constraints(self, total_rcv_pkt_cnt, time_delta_ms, rx_pps):
+ assert(self.PPS_LIMIT_MIN <= rx_pps <= self.PPS_LIMIT_MAX)
+ expected_packets = rx_pps*time_delta_ms/1000
+
+
+# SONIC config contains policer CIR=600 for ARP
+class ARPTest(PolicyTest):
+ def __init__(self):
+ PolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ src_mac = self.mac_map[port_number]
+ src_ip = self.myip[port_number]
+ dst_ip = self.peerip[port_number]
+
+ packet = simple_arp_packet(
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src=src_mac,
+ arp_op=1,
+ ip_snd=src_ip,
+ ip_tgt=dst_ip,
+ hw_snd=src_mac,
+ hw_tgt='ff:ff:ff:ff:ff:ff')
+
+ return packet
+
+# SONIC configuration has no policer limiting for DHCP
+class DHCPTest(NoPolicyTest):
+ def __init__(self):
+ NoPolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ src_mac = self.mac_map[port_number]
+ packet = simple_udp_packet(pktlen=100,
+ eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src=src_mac,
+ dl_vlan_enable=False,
+ vlan_vid=0,
+ vlan_pcp=0,
+ dl_vlan_cfi=0,
+ ip_src='0.0.0.0',
+ ip_dst='255.255.255.255',
+ ip_tos=0,
+ ip_ttl=64,
+ udp_sport=68,
+ udp_dport=67,
+ ip_ihl=None,
+ ip_options=False,
+ with_udp_chksum=True
+ )
+
+ return packet
+
+
+# SONIC configuration has no policer limiting for LLDP
+class LLDPTest(NoPolicyTest):
+ def __init__(self):
+ NoPolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ src_mac = self.mac_map[port_number]
+ packet = simple_eth_packet(
+ eth_dst='01:80:c2:00:00:0e',
+ eth_src=src_mac,
+ eth_type=0x88cc
+ )
+
+ return packet
+
+# SONIC configuration has no policer limiting for BGP
+class BGPTest(NoPolicyTest):
+ def __init__(self):
+ NoPolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ dst_mac = self.test_params['dst_mac']
+ dst_ip = self.peerip[port_number]
+ packet = simple_tcp_packet(
+ eth_dst=dst_mac,
+ ip_dst=dst_ip,
+ tcp_dport=179
+ )
+ return packet
+
+# SONIC configuration has no policer limiting for LACP
+class LACPTest(NoPolicyTest):
+ def __init__(self):
+ NoPolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ packet = simple_eth_packet(
+ pktlen=14,
+ eth_dst='01:80:c2:00:00:02',
+ eth_type=0x8809
+ ) / (chr(0x01)+(chr(0x01)))
+
+ return packet
+
+# SNMP packets are trapped as IP2ME packets.
+# IP2ME configuration in SONIC contains policer CIR=600
+class SNMPTest(PolicyTest): #FIXME: trapped as ip2me. mellanox should add support for SNMP trap
+ def __init__(self):
+ PolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ src_mac = self.mac_map[port_number]
+ dst_mac = self.test_params['dst_mac']
+ dst_ip = self.peerip[port_number]
+ packet = simple_udp_packet(
+ eth_dst=dst_mac,
+ ip_dst=dst_ip,
+ eth_src=src_mac,
+ udp_dport=161
+ )
+ return packet
+
+# SONIC configuration has no policer limiting for SSH
+class SSHTest(PolicyTest): # FIXME: ssh is policed now
+ def __init__(self):
+ PolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ dst_mac = self.test_params['dst_mac']
+ src_ip = self.myip[port_number]
+ dst_ip = self.peerip[port_number]
+
+ packet = simple_tcp_packet(
+ eth_dst=dst_mac,
+ ip_dst=dst_ip,
+ ip_src=src_ip,
+ tcp_sport=22,
+ tcp_dport=22)
+
+ return packet
+
+# IP2ME configuration in SONIC contains policer CIR=600
+class IP2METest(PolicyTest):
+ def __init__(self):
+ PolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def one_port_test(self, port_number):
+ for i in xrange(self.MAX_PORTS):
+ packet = self.contruct_packet(i)
+ total_rcv_pkt_cnt, time_delta, time_delta_ms, tx_pps, rx_pps = self.copp_test(packet, self.PKT_TX_COUNT, (0, port_number), (1, port_number))
+ self.printStats(self.PKT_TX_COUNT, total_rcv_pkt_cnt, time_delta, tx_pps, rx_pps)
+ self.check_constraints(total_rcv_pkt_cnt, time_delta_ms, rx_pps)
+
+ return
+
+ def contruct_packet(self, port_number):
+ src_mac = self.mac_map[port_number]
+ dst_mac = self.test_params['dst_mac']
+ dst_ip = self.peerip[port_number]
+
+ packet = simple_tcp_packet(
+ eth_src=src_mac,
+ eth_dst=dst_mac,
+ ip_dst=dst_ip
+ )
+
+ return packet
+
+
+class DefaultTest(PolicyTest):
+ def __init__(self):
+ PolicyTest.__init__(self)
+
+ def runTest(self):
+ self.run_suite()
+
+ def contruct_packet(self, port_number):
+ dst_mac = self.test_params['dst_mac']
+ src_ip = self.myip[port_number]
+ dst_port_number = (port_number + 1) % self.MAX_PORTS
+ dst_ip = self.peerip[dst_port_number]
+
+ packet = simple_tcp_packet(
+ eth_dst=dst_mac,
+ ip_dst=dst_ip,
+ ip_src=src_ip,
+ tcp_sport=10000,
+ tcp_dport=10000,
+ ip_ttl=1)
+
+ return packet
diff --git a/ansible/roles/test/files/saitests/ecmp_test.py b/ansible/roles/test/files/saitests/ecmp_test.py
new file mode 100644
index 00000000000..f5902651977
--- /dev/null
+++ b/ansible/roles/test/files/saitests/ecmp_test.py
@@ -0,0 +1,102 @@
+"""
+SONiC Dataplane ECMP tests
+"""
+import random
+import time
+import logging
+import ptf.packet as scapy
+import socket
+import ptf.dataplane as dataplane
+import sai_base_test
+from ptf.testutils import *
+from ptf.mask import Mask
+from switch import *
+
+# Constants
+IP_LAST_WORD_RANGE = 254
+IP_2ND_LAST_WORD_RANGE = 16
+NUMBER_OF_SRC_PORTS = 16
+NUMBER_OF_DST_PORTS = 16
+
+class ECMPtest(sai_base_test.ThriftInterfaceDataPlane):
+ def runTest(self):
+ """
+ For SONiC
+ testing ECMP uniformn distribution over 16 RIFs from 16 differnt ports
+ ---- Test require 32 connected ports and SONiC up and running ----
+ ---- MUST RUN WITH "--relax" option on the ptf running line ----
+ Preliminary steps
+ 1. Configure IPs of all of the connected ports
+ (SONiC will configure neighbors)
+ 2. configure all routes (run add_routes.sh on SONiC)
+
+ Test structure
+ Sending Packets sequance
+ 1. Main loop running on 16 sources ports
+ 2. IP_LAST_WORD_RANGE loop running from 0-254
+ 3. IP_2ND_LAST_WORD_RANGE loop running 0-15
+ 4. Inside loop, to increase the number of packets, with differnt ports
+ 5. Sending and reciving packets, and counting destanation ports
+ - Final counters checking for uniform distribution
+
+ Final steps
+ For cleaning configuration run remove_routes.sh from SONiC
+ """
+ switch_init(self.client)
+ random.seed(1)
+ #init vars
+ sport = 0x1234
+ dport = 0x50
+ router_mac = self.test_params['router_mac']
+ destanation_ports = range(NUMBER_OF_SRC_PORTS,NUMBER_OF_DST_PORTS+NUMBER_OF_SRC_PORTS)
+ pkt_counter = [0]*32
+ logging.debug("the router mac is ")
+ logging.debug( router_mac)
+ logging.debug("the rif macs are")
+ for i in range(16): logging.debug( self.dataplane.get_mac(0, i+16))
+ #send packets
+ for port in xrange(NUMBER_OF_SRC_PORTS):
+ for i in xrange(IP_LAST_WORD_RANGE):
+ for j in xrange(IP_2ND_LAST_WORD_RANGE):
+ ip_src = '10.0.0.' + str(port * 2 + 32)
+ src_mac = self.dataplane.get_mac(0, 0)
+ ip_dst = '172.16.' + str(j) + '.' + str(i + 1)
+
+ pkt = simple_tcp_packet(
+ eth_dst=router_mac,
+ eth_src=src_mac,
+ ip_src=ip_src,
+ ip_dst=ip_dst,
+ ip_id=i,
+ tcp_sport=sport,
+ tcp_dport=dport,
+ ip_ttl=64)
+ exp_pkt = simple_tcp_packet(
+ eth_dst=self.dataplane.get_mac(0, 16),
+ eth_src=router_mac,
+ ip_src=ip_src,
+ ip_dst=ip_dst,
+ ip_id=i,
+ tcp_sport=sport,
+ tcp_dport=dport,
+ ip_ttl=63)
+ masked_exp_pkt = Mask(exp_pkt)
+ masked_exp_pkt.set_do_not_care_scapy(scapy.Ether,"dst")
+
+ send_packet(self, port, pkt)
+ (match_index,rcv_pkt) = verify_packet_any_port(self,masked_exp_pkt,destanation_ports)
+ logging.debug("found expected packet from port %d" % destanation_ports[match_index])
+ pkt_counter[match_index] += 1
+ sport = random.randint(0,0xffff)
+ dport = random.randint(0,0xffff)
+
+ #final uniform distribution check
+ for stat_port in xrange(NUMBER_OF_DST_PORTS):
+ logging.debug( "PORT #"+str(hex(port_list[stat_port+NUMBER_OF_SRC_PORTS]))+":")
+ logging.debug(str(pkt_counter[stat_port]))
+ self.assertTrue((pkt_counter[stat_port ] >= ((IP_LAST_WORD_RANGE * IP_2ND_LAST_WORD_RANGE) * 0.9)),
+ "Not all paths are equally balanced, %s" % pkt_counter[stat_port+NUMBER_OF_SRC_PORTS])
+ self.assertTrue((pkt_counter[stat_port ] <= ((IP_LAST_WORD_RANGE * IP_2ND_LAST_WORD_RANGE) * 1.1)),
+ "Not all paths are equally balanced, %s" % pkt_counter[stat_port+NUMBER_OF_SRC_PORTS])
+ print "END OF TEST"
+
diff --git a/ansible/roles/test/files/saitests/sai_base_test.py b/ansible/roles/test/files/saitests/sai_base_test.py
new file mode 100644
index 00000000000..3c0fb1daa5a
--- /dev/null
+++ b/ansible/roles/test/files/saitests/sai_base_test.py
@@ -0,0 +1,91 @@
+"""
+Base classes for test cases
+
+Tests will usually inherit from one of these classes to have the controller
+and/or dataplane automatically set up.
+"""
+
+import os
+import logging
+import unittest
+
+
+import ptf
+from ptf.base_tests import BaseTest
+from ptf import config
+import ptf.dataplane as dataplane
+import ptf.testutils as testutils
+
+################################################################
+#
+# Thrift interface base tests
+#
+################################################################
+
+import switch_sai_thrift.switch_sai_rpc as switch_sai_rpc
+from thrift.transport import TSocket
+from thrift.transport import TTransport
+from thrift.protocol import TBinaryProtocol
+
+interface_to_front_mapping = {}
+
+class ThriftInterface(BaseTest):
+
+ def setUp(self):
+ global interface_to_front_mapping
+
+ BaseTest.setUp(self)
+
+ self.test_params = testutils.test_params_get()
+ if self.test_params.has_key("server"):
+ server = self.test_params['server']
+ else:
+ server = 'localhost'
+
+ if self.test_params.has_key("port_map"):
+ user_input = self.test_params['port_map']
+ splitted_map = user_input.split(",")
+ for item in splitted_map:
+ interface_front_pair = item.split("@")
+ interface_to_front_mapping[interface_front_pair[0]] = interface_front_pair[1]
+ elif self.test_params.has_key("port_map_file"):
+ user_input = self.test_params['port_map_file']
+ f = open(user_input, 'r')
+ for line in f:
+ if (len(line) > 0 and (line[0] == '#' or line[0] == ';' or line[0]=='/')):
+ continue;
+ interface_front_pair = line.split("@")
+ interface_to_front_mapping[interface_front_pair[0]] = interface_front_pair[1].strip()
+ else:
+ exit("No ptf interface<-> switch front port mapping, please specify as parameter or in external file")
+
+ # Set up thrift client and contact server
+ self.transport = TSocket.TSocket(server, 9092)
+ self.transport = TTransport.TBufferedTransport(self.transport)
+ self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
+
+ self.client = switch_sai_rpc.Client(self.protocol)
+ self.transport.open()
+
+ def tearDown(self):
+ if config["log_dir"] != None:
+ self.dataplane.stop_pcap()
+ BaseTest.tearDown(self)
+ self.transport.close()
+
+class ThriftInterfaceDataPlane(ThriftInterface):
+ """
+ Root class that sets up the thrift interface and dataplane
+ """
+ def setUp(self):
+ ThriftInterface.setUp(self)
+ self.dataplane = ptf.dataplane_instance
+ self.dataplane.flush()
+ if config["log_dir"] != None:
+ filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
+ self.dataplane.start_pcap(filename)
+
+ def tearDown(self):
+ if config["log_dir"] != None:
+ self.dataplane.stop_pcap()
+ ThriftInterface.tearDown(self)
diff --git a/ansible/roles/test/files/saitests/switch.py b/ansible/roles/test/files/saitests/switch.py
new file mode 100644
index 00000000000..a5fb00a8989
--- /dev/null
+++ b/ansible/roles/test/files/saitests/switch.py
@@ -0,0 +1,676 @@
+# Copyright 2013-present Barefoot Networks, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Thrift SAI interface basic tests
+"""
+
+import switch_sai_thrift
+from sai_base_test import *
+import time
+import sys
+import logging
+
+import unittest
+import random
+
+import sai_base_test
+
+from ptf import config
+from ptf.testutils import *
+from ptf.thriftutils import *
+
+import os
+
+from switch_sai_thrift.ttypes import *
+
+from switch_sai_thrift.sai_headers import *
+
+
+this_dir = os.path.dirname(os.path.abspath(__file__))
+
+switch_inited=0
+port_list = {}
+sai_port_list = []
+front_port_list = []
+table_attr_list = []
+router_mac='00:77:66:55:44:00'
+rewrite_mac1='00:77:66:55:45:01'
+rewrite_mac2='00:77:66:55:46:01'
+
+is_bmv2 = ('BMV2_TEST' in os.environ) and (int(os.environ['BMV2_TEST']) == 1)
+
+def switch_init(client):
+ global switch_inited
+ if switch_inited:
+ return
+
+ switch_attr_list = client.sai_thrift_get_switch_attribute()
+ attr_list = switch_attr_list.attr_list
+ for attribute in attr_list:
+ if attribute.id == SAI_SWITCH_ATTR_PORT_NUMBER:
+ print "max ports: " + attribute.value.u32
+ elif attribute.id == SAI_SWITCH_ATTR_PORT_LIST:
+ for port_id in attribute.value.objlist.object_id_list:
+ attr_value = sai_thrift_attribute_value_t(booldata=1)
+ attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_ADMIN_STATE, value=attr_value)
+ client.sai_thrift_set_port_attribute(port_id, attr)
+ sai_port_list.append(port_id)
+ else:
+ print "unknown switch attribute"
+
+ attr_value = sai_thrift_attribute_value_t(mac='00:77:66:55:44:33')
+ attr = sai_thrift_attribute_t(id=SAI_SWITCH_ATTR_SRC_MAC_ADDRESS, value=attr_value)
+ client.sai_thrift_set_switch_attribute(attr)
+
+ # wait till the port are up
+ time.sleep(10)
+
+ thrift_attr = client.sai_thrift_get_port_list_by_front_port()
+ if thrift_attr.id == SAI_SWITCH_ATTR_PORT_LIST:
+ for port_id in thrift_attr.value.objlist.object_id_list:
+ front_port_list.append(port_id)
+
+ for interface,front in interface_to_front_mapping.iteritems():
+ sai_port_id = client.sai_thrift_get_port_id_by_front_port(front);
+ port_list[int(interface)]=sai_port_id
+
+ switch_inited = 1
+
+
+def sai_thrift_create_fdb(client, vlan_id, mac, port, mac_action):
+ fdb_entry = sai_thrift_fdb_entry_t(mac_address=mac, vlan_id=vlan_id)
+ #value 0 represents static entry, id=0, represents entry type
+ fdb_attribute1_value = sai_thrift_attribute_value_t(s32=SAI_FDB_ENTRY_STATIC)
+ fdb_attribute1 = sai_thrift_attribute_t(id=SAI_FDB_ENTRY_ATTR_TYPE,
+ value=fdb_attribute1_value)
+ #value oid represents object id, id=1 represents port id
+ fdb_attribute2_value = sai_thrift_attribute_value_t(oid=port)
+ fdb_attribute2 = sai_thrift_attribute_t(id=SAI_FDB_ENTRY_ATTR_PORT_ID,
+ value=fdb_attribute2_value)
+ #value oid represents object id, id=1 represents port id
+ fdb_attribute3_value = sai_thrift_attribute_value_t(s32=mac_action)
+ fdb_attribute3 = sai_thrift_attribute_t(id=SAI_FDB_ENTRY_ATTR_PACKET_ACTION,
+ value=fdb_attribute3_value)
+ fdb_attr_list = [fdb_attribute1, fdb_attribute2, fdb_attribute3]
+ client.sai_thrift_create_fdb_entry(thrift_fdb_entry=fdb_entry, thrift_attr_list=fdb_attr_list)
+
+def sai_thrift_delete_fdb(client, vlan_id, mac, port):
+ fdb_entry = sai_thrift_fdb_entry_t(mac_address=mac, vlan_id=vlan_id)
+ client.sai_thrift_delete_fdb_entry(thrift_fdb_entry=fdb_entry)
+
+def sai_thrift_flush_fdb_by_vlan(client, vlan_id):
+ fdb_attribute1_value = sai_thrift_attribute_value_t(u16=vlan_id)
+ fdb_attribute1 = sai_thrift_attribute_t(id=SAI_FDB_FLUSH_ATTR_VLAN_ID,
+ value=fdb_attribute1_value)
+ fdb_attribute2_value = sai_thrift_attribute_value_t(s32=SAI_FDB_FLUSH_ENTRY_DYNAMIC)
+ fdb_attribute2 = sai_thrift_attribute_t(id=SAI_FDB_FLUSH_ATTR_ENTRY_TYPE,
+ value=fdb_attribute2_value)
+ fdb_attr_list = [fdb_attribute1, fdb_attribute2]
+ client.sai_thrift_flush_fdb_entries(thrift_attr_list=fdb_attr_list)
+
+def sai_thrift_create_virtual_router(client, v4_enabled, v6_enabled):
+ #v4 enabled
+ vr_attribute1_value = sai_thrift_attribute_value_t(booldata=v4_enabled)
+ vr_attribute1 = sai_thrift_attribute_t(id=SAI_VIRTUAL_ROUTER_ATTR_ADMIN_V4_STATE,
+ value=vr_attribute1_value)
+ #v6 enabled
+ vr_attribute2_value = sai_thrift_attribute_value_t(booldata=v6_enabled)
+ vr_attribute2 = sai_thrift_attribute_t(id=SAI_VIRTUAL_ROUTER_ATTR_ADMIN_V6_STATE,
+ value=vr_attribute1_value)
+ vr_attr_list = [vr_attribute1, vr_attribute2]
+ vr_id = client.sai_thrift_create_virtual_router(thrift_attr_list=vr_attr_list)
+ return vr_id
+
+def sai_thrift_create_router_interface(client, vr_id, is_port, port_id, vlan_id, v4_enabled, v6_enabled, mac):
+ #vrf attribute
+ rif_attr_list = []
+ rif_attribute1_value = sai_thrift_attribute_value_t(oid=vr_id)
+ rif_attribute1 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID,
+ value=rif_attribute1_value)
+ rif_attr_list.append(rif_attribute1)
+ if is_port:
+ #port type and port id
+ rif_attribute2_value = sai_thrift_attribute_value_t(s32=SAI_ROUTER_INTERFACE_TYPE_PORT)
+ rif_attribute2 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_TYPE,
+ value=rif_attribute2_value)
+ rif_attr_list.append(rif_attribute2)
+ rif_attribute3_value = sai_thrift_attribute_value_t(oid=port_id)
+ rif_attribute3 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_PORT_ID,
+ value=rif_attribute3_value)
+ rif_attr_list.append(rif_attribute3)
+ else:
+ #vlan type and vlan id
+ rif_attribute2_value = sai_thrift_attribute_value_t(s32=SAI_ROUTER_INTERFACE_TYPE_VLAN)
+ rif_attribute2 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_TYPE,
+ value=rif_attribute2_value)
+ rif_attr_list.append(rif_attribute2)
+ rif_attribute3_value = sai_thrift_attribute_value_t(u16=vlan_id)
+ rif_attribute3 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_VLAN_ID,
+ value=rif_attribute3_value)
+ rif_attr_list.append(rif_attribute3)
+
+ #v4_enabled
+ rif_attribute4_value = sai_thrift_attribute_value_t(booldata=v4_enabled)
+ rif_attribute4 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_ADMIN_V4_STATE,
+ value=rif_attribute4_value)
+ rif_attr_list.append(rif_attribute4)
+ #v6_enabled
+ rif_attribute5_value = sai_thrift_attribute_value_t(booldata=v6_enabled)
+ rif_attribute5 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_ADMIN_V6_STATE,
+ value=rif_attribute5_value)
+ rif_attr_list.append(rif_attribute5)
+
+ if mac:
+ rif_attribute6_value = sai_thrift_attribute_value_t(mac=mac)
+ rif_attribute6 = sai_thrift_attribute_t(id=SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS,
+ value=rif_attribute6_value)
+ rif_attr_list.append(rif_attribute6)
+
+ rif_id = client.sai_thrift_create_router_interface(rif_attr_list)
+ return rif_id
+
+def sai_thrift_create_route(client, vr_id, addr_family, ip_addr, ip_mask, nhop):
+ if addr_family == SAI_IP_ADDR_FAMILY_IPV4:
+ addr = sai_thrift_ip_t(ip4=ip_addr)
+ mask = sai_thrift_ip_t(ip4=ip_mask)
+ ip_prefix = sai_thrift_ip_prefix_t(addr_family=SAI_IP_ADDR_FAMILY_IPV4, addr=addr, mask=mask)
+ else:
+ addr = sai_thrift_ip_t(ip6=ip_addr)
+ mask = sai_thrift_ip_t(ip6=ip_mask)
+ ip_prefix = sai_thrift_ip_prefix_t(addr_family=SAI_IP_ADDR_FAMILY_IPV6, addr=addr, mask=mask)
+ route_attribute1_value = sai_thrift_attribute_value_t(oid=nhop)
+ route_attribute1 = sai_thrift_attribute_t(id=SAI_ROUTE_ATTR_NEXT_HOP_ID,
+ value=route_attribute1_value)
+ route = sai_thrift_unicast_route_entry_t(vr_id, ip_prefix)
+ route_attr_list = [route_attribute1]
+ client.sai_thrift_create_route(thrift_unicast_route_entry=route, thrift_attr_list=route_attr_list)
+
+def sai_thrift_remove_route(client, vr_id, addr_family, ip_addr, ip_mask, nhop):
+ if addr_family == SAI_IP_ADDR_FAMILY_IPV4:
+ addr = sai_thrift_ip_t(ip4=ip_addr)
+ mask = sai_thrift_ip_t(ip4=ip_mask)
+ ip_prefix = sai_thrift_ip_prefix_t(addr_family=SAI_IP_ADDR_FAMILY_IPV4, addr=addr, mask=mask)
+ else:
+ addr = sai_thrift_ip_t(ip6=ip_addr)
+ mask = sai_thrift_ip_t(ip6=ip_mask)
+ ip_prefix = sai_thrift_ip_prefix_t(addr_family=SAI_IP_ADDR_FAMILY_IPV6, addr=addr, mask=mask)
+ route = sai_thrift_unicast_route_entry_t(vr_id, ip_prefix)
+ client.sai_thrift_remove_route(thrift_unicast_route_entry=route)
+
+def sai_thrift_create_nhop(client, addr_family, ip_addr, rif_id):
+ if addr_family == SAI_IP_ADDR_FAMILY_IPV4:
+ addr = sai_thrift_ip_t(ip4=ip_addr)
+ ipaddr = sai_thrift_ip_address_t(addr_family=SAI_IP_ADDR_FAMILY_IPV4, addr=addr)
+ else:
+ addr = sai_thrift_ip_t(ip6=ip_addr)
+ ipaddr = sai_thrift_ip_address_t(addr_family=SAI_IP_ADDR_FAMILY_IPV6, addr=addr)
+ nhop_attribute1_value = sai_thrift_attribute_value_t(ipaddr=ipaddr)
+ nhop_attribute1 = sai_thrift_attribute_t(id=SAI_NEXT_HOP_ATTR_IP,
+ value=nhop_attribute1_value)
+ nhop_attribute2_value = sai_thrift_attribute_value_t(oid=rif_id)
+ nhop_attribute2 = sai_thrift_attribute_t(id=SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID,
+ value=nhop_attribute2_value)
+ nhop_attribute3_value = sai_thrift_attribute_value_t(s32=SAI_NEXT_HOP_IP)
+ nhop_attribute3 = sai_thrift_attribute_t(id=SAI_NEXT_HOP_ATTR_TYPE,
+ value=nhop_attribute3_value)
+ nhop_attr_list = [nhop_attribute1, nhop_attribute2, nhop_attribute3]
+ nhop = client.sai_thrift_create_next_hop(thrift_attr_list=nhop_attr_list)
+ return nhop
+
+def sai_thrift_create_neighbor(client, addr_family, rif_id, ip_addr, dmac):
+ if addr_family == SAI_IP_ADDR_FAMILY_IPV4:
+ addr = sai_thrift_ip_t(ip4=ip_addr)
+ ipaddr = sai_thrift_ip_address_t(addr_family=SAI_IP_ADDR_FAMILY_IPV4, addr=addr)
+ else:
+ addr = sai_thrift_ip_t(ip6=ip_addr)
+ ipaddr = sai_thrift_ip_address_t(addr_family=SAI_IP_ADDR_FAMILY_IPV6, addr=addr)
+ neighbor_attribute1_value = sai_thrift_attribute_value_t(mac=dmac)
+ neighbor_attribute1 = sai_thrift_attribute_t(id=SAI_NEIGHBOR_ATTR_DST_MAC_ADDRESS,
+ value=neighbor_attribute1_value)
+ neighbor_attr_list = [neighbor_attribute1]
+ neighbor_entry = sai_thrift_neighbor_entry_t(rif_id=rif_id, ip_address=ipaddr)
+ client.sai_thrift_create_neighbor_entry(neighbor_entry, neighbor_attr_list)
+
+def sai_thrift_remove_neighbor(client, addr_family, rif_id, ip_addr, dmac):
+ if addr_family == SAI_IP_ADDR_FAMILY_IPV4:
+ addr = sai_thrift_ip_t(ip4=ip_addr)
+ ipaddr = sai_thrift_ip_address_t(addr_family=SAI_IP_ADDR_FAMILY_IPV4, addr=addr)
+ else:
+ addr = sai_thrift_ip_t(ip6=ip_addr)
+ ipaddr = sai_thrift_ip_address_t(addr_family=SAI_IP_ADDR_FAMILY_IPV6, addr=addr)
+ neighbor_entry = sai_thrift_neighbor_entry_t(rif_id=rif_id, ip_address=ipaddr)
+ client.sai_thrift_remove_neighbor_entry(neighbor_entry)
+
+def sai_thrift_create_next_hop_group(client, nhop_list):
+ nhop_group_attribute1_value = sai_thrift_attribute_value_t(s32=SAI_NEXT_HOP_GROUP_ECMP)
+ nhop_group_attribute1 = sai_thrift_attribute_t(id=SAI_NEXT_HOP_GROUP_ATTR_TYPE,
+ value=nhop_group_attribute1_value)
+ nhop_objlist = sai_thrift_object_list_t(count=len(nhop_list), object_id_list=nhop_list)
+ nhop_group_attribute2_value = sai_thrift_attribute_value_t(objlist=nhop_objlist)
+ nhop_group_attribute2 = sai_thrift_attribute_t(id=SAI_NEXT_HOP_GROUP_ATTR_NEXT_HOP_LIST,
+ value=nhop_group_attribute2_value)
+ nhop_group_attr_list = [nhop_group_attribute1, nhop_group_attribute2]
+ nhop_group = client.sai_thrift_create_next_hop_group(thrift_attr_list=nhop_group_attr_list)
+ return nhop_group
+
+def sai_thrift_create_lag(client, port_list):
+ lag_port_list = sai_thrift_object_list_t(count=len(port_list), object_id_list=port_list)
+ lag1_attr_value = sai_thrift_attribute_value_t(objlist=lag_port_list)
+ lag1_attr = sai_thrift_attribute_t(id=SAI_LAG_ATTR_PORT_LIST,
+ value=lag1_attr_value)
+ lag_attr_list = [lag1_attr]
+ lag = client.sai_thrift_create_lag(lag_attr_list)
+ return lag
+
+def sai_thrift_create_lag_member(client, lag_id, port_id):
+ lag_member_attr1_value = sai_thrift_attribute_value_t(oid=lag_id)
+ lag_member_attr1 = sai_thrift_attribute_t(id=SAI_LAG_MEMBER_ATTR_LAG_ID,
+ value=lag_member_attr1_value)
+ lag_member_attr2_value = sai_thrift_attribute_value_t(oid=port_id)
+ lag_member_attr2 = sai_thrift_attribute_t(id=SAI_LAG_MEMBER_ATTR_PORT_ID,
+ value=lag_member_attr2_value)
+ lag_member_attr_list = [lag_member_attr1, lag_member_attr2]
+ lag_member_id = client.sai_thrift_create_lag_member(lag_member_attr_list)
+ return lag_member_id
+
+def sai_thrift_create_stp_entry(client, vlan_list):
+ vlanlist=sai_thrift_vlan_list_t(vlan_count=len(vlan_list), vlan_list=vlan_list)
+ stp_attribute1_value = sai_thrift_attribute_value_t(vlanlist=vlanlist)
+ stp_attribute1 = sai_thrift_attribute_t(id=SAI_STP_ATTR_VLAN_LIST,
+ value=stp_attribute1_value)
+ stp_attr_list = [stp_attribute1]
+ stp_id = client.sai_thrift_create_stp_entry(stp_attr_list)
+ return stp_id
+
+def sai_thrift_create_hostif_trap_group(client, queue_id):
+ attribute_value = sai_thrift_attribute_value_t(u32=queue_id)
+ attribute = sai_thrift_attribute_t(id=SAI_HOSTIF_TRAP_GROUP_ATTR_QUEUE,
+ value=attribute_value)
+ attr_list = [attribute]
+ trap_group_id = client.sai_thrift_create_hostif_trap_group(thrift_attr_list=attr_list)
+ return trap_group_id
+
+def sai_thrift_create_hostif_trap(client, trap_id, action, priority, channel, trap_group_id):
+ attribute3_value = sai_thrift_attribute_value_t(s32=channel)
+ attribute3 = sai_thrift_attribute_t(id=SAI_HOSTIF_TRAP_ATTR_TRAP_CHANNEL,
+ value=attribute3_value)
+ client.sai_thrift_set_hostif_trap(trap_id, attribute3)
+ attribute4_value = sai_thrift_attribute_value_t(oid=trap_group_id)
+ attribute4 = sai_thrift_attribute_t(id=SAI_HOSTIF_TRAP_ATTR_TRAP_GROUP,
+ value=attribute4_value)
+ client.sai_thrift_set_hostif_trap(trap_id, attribute4)
+ attribute1_value = sai_thrift_attribute_value_t(s32=action)
+ attribute1 = sai_thrift_attribute_t(id=SAI_HOSTIF_TRAP_ATTR_PACKET_ACTION,
+ value=attribute1_value)
+ client.sai_thrift_set_hostif_trap(trap_id, attribute1)
+ attribute2_value = sai_thrift_attribute_value_t(u32=priority)
+ attribute2 = sai_thrift_attribute_t(id=SAI_HOSTIF_TRAP_ATTR_TRAP_PRIORITY,
+ value=attribute2_value)
+ client.sai_thrift_set_hostif_trap(trap_id, attribute2)
+
+def sai_thrift_create_hostif(client, rif_or_port_id, intf_name):
+ attribute1_value = sai_thrift_attribute_value_t(s32=SAI_HOSTIF_TYPE_NETDEV)
+ attribute1 = sai_thrift_attribute_t(id=SAI_HOSTIF_ATTR_TYPE,
+ value=attribute1_value)
+ attribute2_value = sai_thrift_attribute_value_t(oid=rif_or_port_id)
+ attribute2 = sai_thrift_attribute_t(id=SAI_HOSTIF_ATTR_RIF_OR_PORT_ID,
+ value=attribute2_value)
+ attribute3_value = sai_thrift_attribute_value_t(chardata=intf_name)
+ attribute3 = sai_thrift_attribute_t(id=SAI_HOSTIF_ATTR_NAME,
+ value=attribute3_value)
+ attr_list = [attribute1, attribute2, attribute3]
+ hif_id = client.sai_thrift_create_hostif(attr_list)
+ return hif_id
+
+def sai_thrift_create_acl_table(client, addr_family,
+ ip_src, ip_dst,
+ ip_proto,
+ in_ports, out_ports,
+ in_port, out_port):
+ acl_attr_list = []
+ if ip_src != None:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_SRC_IP,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ if ip_dst != None:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_DST_IP,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ if ip_proto != None:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_IP_PROTOCOL,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ if in_ports:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_IN_PORTS,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ if out_ports:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_OUT_PORTS,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ if in_port != None:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_IN_PORT,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ if out_port != None:
+ attribute_value = sai_thrift_attribute_value_t(booldata=1)
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_TABLE_ATTR_FIELD_OUT_PORT,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ acl_table_id = client.sai_thrift_create_acl_table(acl_attr_list)
+ return acl_table_id
+
+def sai_thrift_create_acl_entry(client, acl_table_id,
+ action, addr_family,
+ ip_src, ip_src_mask,
+ ip_dst, ip_dst_mask,
+ ip_proto,
+ in_port_list, out_port_list,
+ in_port, out_port,
+ ingress_mirror, egress_mirror):
+ acl_attr_list = []
+
+ #OID
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(oid=acl_table_id)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_TABLE_ID,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ #Priority
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(u32=10)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_PRIORITY,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ #Ip source
+ if ip_src != None:
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(ip4=ip_src), mask =sai_thrift_acl_mask_t(ip4=ip_src_mask)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_FIELD_SRC_IP,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ #Input ports
+ if in_port_list:
+ acl_port_list = sai_thrift_object_list_t(count=len(in_port_list), object_id_list=in_port_list)
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(objlist=acl_port_list)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_FIELD_IN_PORTS,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ #Output ports
+ if out_port_list:
+ acl_port_list = sai_thrift_object_list_t(count=len(out_port_list), object_id_list=out_port_list)
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(objlist=acl_port_list)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORTS,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ if in_port != None:
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(oid=in_port)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_FIELD_IN_PORT,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ if out_port != None:
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(oid=out_port)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_FIELD_OUT_PORT,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ #Packet action
+ if action == 1:
+ #Drop
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(u8=0)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_PACKET_ACTION,
+ value=attribute_value)
+ acl_attr_list.append(attribute)
+ elif action == 2:
+ #Ingress mirroring
+ if ingress_mirror != None:
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(oid=ingress_mirror)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_INGRESS, value=attribute_value)
+ acl_attr_list.append(attribute)
+ elif egress_mirror != None:
+ attribute_value = sai_thrift_attribute_value_t(aclfield=sai_thrift_acl_field_data_t(data = sai_thrift_acl_data_t(oid=egress_mirror)))
+ attribute = sai_thrift_attribute_t(id=SAI_ACL_ENTRY_ATTR_ACTION_MIRROR_EGRESS, value=attribute_value)
+ acl_attr_list.append(attribute)
+
+ acl_entry_id = client.sai_thrift_create_acl_entry(acl_attr_list)
+ return acl_entry_id
+
+def sai_thrift_create_mirror_session(client, mirror_type, port,
+ vlan, vlan_priority, vlan_tpid,
+ src_mac, dst_mac,
+ addr_family, src_ip, dst_ip,
+ encap_type, protocol, ttl, tos):
+ mirror_attr_list = []
+
+ #Mirror type
+ attribute1_value = sai_thrift_attribute_value_t(u8=mirror_type)
+ attribute1 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_TYPE,
+ value=attribute1_value)
+ mirror_attr_list.append(attribute1)
+
+ #Monitor port
+ attribute2_value = sai_thrift_attribute_value_t(oid=port)
+ attribute2 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_MONITOR_PORT,
+ value=attribute2_value)
+ mirror_attr_list.append(attribute2)
+
+ if mirror_type == SAI_MIRROR_TYPE_LOCAL:
+ attribute4_value = sai_thrift_attribute_value_t(u16=vlan)
+ attribute4 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_VLAN_ID,
+ value=attribute4_value)
+ mirror_attr_list.append(attribute4)
+ elif mirror_type == SAI_MIRROR_TYPE_REMOTE:
+ #vlan tpid
+ attribute3_value = sai_thrift_attribute_value_t(u16=vlan_tpid)
+ attribute3 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_VLAN_TPID,
+ value=attribute3_value)
+ mirror_attr_list.append(attribute3)
+
+ #vlan
+ attribute4_value = sai_thrift_attribute_value_t(u16=vlan)
+ attribute4 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_VLAN_ID,
+ value=attribute4_value)
+ mirror_attr_list.append(attribute4)
+
+ #vlan priority
+ attribute5_value = sai_thrift_attribute_value_t(u16=vlan_priority)
+ attribute4 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_VLAN_PRI,
+ value=attribute5_value)
+ mirror_attr_list.append(attribute5)
+ elif mirror_type == SAI_MIRROR_TYPE_ENHANCED_REMOTE:
+ #encap type
+ attribute3_value = sai_thrift_attribute_value_t(u8=encap_type)
+ attribute3 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_ENCAP_TYPE,
+ value=attribute3_value)
+ mirror_attr_list.append(attribute3)
+
+ #source ip
+ addr = sai_thrift_ip_t(ip4=src_ip)
+ src_ip_addr = sai_thrift_ip_address_t(addr_family=addr_family, addr=addr)
+ attribute4_value = sai_thrift_attribute_value_t(ipaddr=src_ip_addr)
+ attribute4 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_SRC_IP_ADDRESS,
+ value=attribute4_value)
+ mirror_attr_list.append(attribute4)
+
+ #dst ip
+ addr = sai_thrift_ip_t(ip4=dst_ip)
+ dst_ip_addr = sai_thrift_ip_address_t(addr_family=addr_family, addr=addr)
+ attribute5_value = sai_thrift_attribute_value_t(ipaddr=dst_ip_addr)
+ attribute5 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_DST_IP_ADDRESS,
+ value=attribute5_value)
+ mirror_attr_list.append(attribute5)
+
+ #source mac
+ attribute6_value = sai_thrift_attribute_value_t(mac=src_mac)
+ attribute6 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_SRC_MAC_ADDRESS,
+ value=attribute6_value)
+ mirror_attr_list.append(attribute6)
+
+ #dst mac
+ attribute7_value = sai_thrift_attribute_value_t(mac=dst_mac)
+ attribute7 = sai_thrift_attribute_t(id=SAI_MIRROR_SESSION_ATTR_DST_MAC_ADDRESS,
+ value=attribute7_value)
+ mirror_attr_list.append(attribute7)
+
+ mirror_id = client.sai_thrift_create_mirror_session(mirror_attr_list)
+ return mirror_id
+
+def sai_thrift_create_scheduler_profile(client, max_rate, algorithm=0):
+ scheduler_attr_list = []
+ attribute_value = sai_thrift_attribute_value_t(u64=max_rate)
+ attribute = sai_thrift_attribute_t(id=SAI_SCHEDULER_ATTR_MAX_BANDWIDTH_RATE ,
+ value=attribute_value)
+ scheduler_attr_list.append(attribute)
+ attribute_value = sai_thrift_attribute_value_t(s32=algorithm)
+ attribute = sai_thrift_attribute_t(id=SAI_SCHEDULER_ATTR_SCHEDULING_ALGORITHM ,
+ value=attribute_value)
+ scheduler_attr_list.append(attribute)
+ scheduler_profile_id = client.sai_thrift_create_scheduler_profile(scheduler_attr_list)
+ return scheduler_profile_id
+
+def sai_thrift_create_buffer_profile(client, pool_id, size, threshold, xoff_th, xon_th):
+ buffer_attr_list = []
+ attribute_value = sai_thrift_attribute_value_t(oid=pool_id)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_PROFILE_ATTR_POOL_ID ,
+ value=attribute_value)
+ buffer_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(u32=size)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_PROFILE_ATTR_BUFFER_SIZE ,
+ value=attribute_value)
+ buffer_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(u8=threshold)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_PROFILE_ATTR_SHARED_DYNAMIC_TH ,
+ value=attribute_value)
+ buffer_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(u32=xoff_th)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_PROFILE_ATTR_XOFF_TH ,
+ value=attribute_value)
+ buffer_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(u32=xon_th)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_PROFILE_ATTR_XON_TH ,
+ value=attribute_value)
+ buffer_attr_list.append(attribute)
+
+ buffer_profile_id = client.sai_thrift_create_buffer_profile(buffer_attr_list)
+ return buffer_profile_id
+
+def sai_thrift_create_pool_profile(client, pool_type, size, threshold_mode):
+ pool_attr_list = []
+ attribute_value = sai_thrift_attribute_value_t(s32=pool_type)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_POOL_ATTR_TYPE ,
+ value=attribute_value)
+ pool_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(u32=size)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_POOL_ATTR_SIZE ,
+ value=attribute_value)
+ pool_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(s32=threshold_mode)
+ attribute = sai_thrift_attribute_t(id=SAI_BUFFER_POOL_ATTR_TH_MODE ,
+ value=attribute_value)
+ pool_attr_list.append(attribute)
+ pool_id = client.sai_thrift_create_pool_profile(pool_attr_list)
+ return pool_id
+
+def sai_thrift_clear_all_counters(client):
+ for port in sai_port_list:
+ queue_list=[]
+ client.sai_thrift_clear_port_all_stats(port)
+ port_attr_list = client.sai_thrift_get_port_attribute(port)
+ attr_list = port_attr_list.attr_list
+ for attribute in attr_list:
+ if attribute.id == SAI_PORT_ATTR_QOS_QUEUE_LIST:
+ for queue_id in attribute.value.objlist.object_id_list:
+ queue_list.append(queue_id)
+
+ cnt_ids=[]
+ cnt_ids.append(SAI_QUEUE_STAT_PACKETS)
+ for queue in queue_list:
+ client.sai_thrift_clear_queue_stats(queue,cnt_ids,len(cnt_ids))
+
+def sai_thrift_read_port_counters(client,port):
+ port_cnt_ids=[]
+ port_cnt_ids.append(SAI_PORT_STAT_IF_OUT_DISCARDS)
+ port_cnt_ids.append(SAI_PORT_STAT_ETHER_STATS_DROP_EVENTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_0_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_1_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_2_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_3_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_4_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_5_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_6_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_PFC_7_TX_PKTS)
+ port_cnt_ids.append(SAI_PORT_STAT_IF_OUT_OCTETS)
+ port_cnt_ids.append(SAI_PORT_STAT_IF_OUT_UCAST_PKTS)
+ counters_results=[]
+ counters_results = client.sai_thrift_get_port_stats(port,port_cnt_ids,len(port_cnt_ids))
+ queue_list=[]
+ port_attr_list = client.sai_thrift_get_port_attribute(port)
+ attr_list = port_attr_list.attr_list
+ for attribute in attr_list:
+ if attribute.id == SAI_PORT_ATTR_QOS_QUEUE_LIST:
+ for queue_id in attribute.value.objlist.object_id_list:
+ queue_list.append(queue_id)
+ cnt_ids=[]
+ thrift_results=[]
+ queue_counters_results=[]
+ cnt_ids.append(SAI_QUEUE_STAT_PACKETS)
+ queue1=0
+ for queue in queue_list:
+ if queue1 <= 7:
+ thrift_results=client.sai_thrift_get_queue_stats(queue,cnt_ids,len(cnt_ids))
+ queue_counters_results.append(thrift_results[0])
+ queue1+=1
+ return (counters_results, queue_counters_results)
+
+def sai_thrift_create_vlan_member(client, vlan_id, port_id, tagging_mode):
+ vlan_member_attr_list = []
+ attribute_value = sai_thrift_attribute_value_t(s32=vlan_id)
+ attribute = sai_thrift_attribute_t(id=SAI_VLAN_MEMBER_ATTR_VLAN_ID,
+ value=attribute_value)
+ vlan_member_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(oid=port_id)
+ attribute = sai_thrift_attribute_t(id=SAI_VLAN_MEMBER_ATTR_PORT_ID,
+ value=attribute_value)
+ vlan_member_attr_list.append(attribute)
+
+ attribute_value = sai_thrift_attribute_value_t(s32=tagging_mode)
+ attribute = sai_thrift_attribute_t(id=SAI_VLAN_MEMBER_ATTR_TAGGING_MODE,
+ value=attribute_value)
+ vlan_member_attr_list.append(attribute)
+ vlan_member_id = client.sai_thrift_create_vlan_member(vlan_member_attr_list)
+ return vlan_member_id
+
+def sai_thrift_set_port_shaper(client, port_id, max_rate):
+ sched_prof_id=sai_thrift_create_scheduler_profile(client, max_rate)
+ attr_value = sai_thrift_attribute_value_t(oid=sched_prof_id)
+ attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_QOS_SCHEDULER_PROFILE_ID, value=attr_value)
+ client.sai_thrift_set_port_attribute(port_id,attr)
diff --git a/ansible/roles/test/handlers/main.yml b/ansible/roles/test/handlers/main.yml
new file mode 100644
index 00000000000..92af17dacaa
--- /dev/null
+++ b/ansible/roles/test/handlers/main.yml
@@ -0,0 +1,62 @@
+---
+# Handlers for acs
+
+- name: Restart SNMP Daemon
+ become: true
+ service: name=snmp
+ state=restarted
+
+- name: Restart NTP Daemon
+ become: true
+ service: name=ntp
+ state=restarted
+
+- name: Restart Syslog Daemon
+ become: true
+ service: name=rsyslog
+ state=restarted
+
+- name: Restart LLDP Daemon
+ become: true
+ service: name=lldp
+ state=restarted
+
+- name: Restart Quagga Daemon
+ become: true
+ service: name=bgp
+ state=restarted
+
+- name: Restart Platform Monitor Container Service
+ become: true
+ service: name=platform-monitor
+ state=restarted
+
+- name: Update Grub
+ become: true
+ shell: /usr/sbin/update-grub
+
+- name: Restart interface
+ become: true
+ shell: ifdown {{ restart_interface }} && ifup {{ restart_interface }}; rc=$?; sleep 3; exit $rc
+
+- name: Restart smartd
+ become: true
+ service:
+ name=smartd
+ state=restarted
+
+- name: Restart vasd
+ become: true
+ service:
+ name=vas
+ state=restarted
+
+- name: Restart anything-sync-daemon
+ become: true
+ service:
+ name=asd
+ state=restarted
+
+- name: Clean up apt
+ become: true
+ shell: apt-get autoremove -y; apt-get autoclean -y; apt-get clean -y
diff --git a/ansible/roles/test/tasks/arpall.yml b/ansible/roles/test/tasks/arpall.yml
new file mode 100644
index 00000000000..3bcd65fb0d4
--- /dev/null
+++ b/ansible/roles/test/tasks/arpall.yml
@@ -0,0 +1,143 @@
+##### This playbook tests Kernal ARP/GARP request and reply work as design
+- name: collect ACS current configuration info(to find macaddress)
+ setup:
+
+- name: change ACS DUT interface IP to test IP address
+ command: /sbin/ifconfig Ethernet4 10.10.1.2 netmask 255.255.255.240
+ become: yes
+
+- name: change ACS DUT interface IP to test IP address
+ command: /sbin/ifconfig Ethernet8 10.10.1.20 netmask 255.255.255.240
+ become: yes
+
+- name: copy acsbase files
+ copy: src=roles/test/files/acstests
+ dest=/root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Clear DUT arp cache
+ command: ip nei flush all
+ become: yes
+
+# Send correct ARP request from correct interface, expecting normal behavior
+- name: Send correct arp packets (10.10.1.3 to 10.10.1.2 with src_mac=00:06:07:08:09:0a)
+ command: ptf --test-dir acstests arptest.ExpectReply --platform remote -t "acs_mac='{{ ansible_Ethernet4['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get DUT arp table
+ switch_arptable:
+ register: arptable1
+
+- name: Check ACS ARP table and confirm macaddress and interface are correct
+ assert:
+ that:
+ - "{{ arptable1['arptable']['v4']['10.10.1.3']['macaddress'] == '00:06:07:08:09:0a' }}"
+ - "{{ arptable1['arptable']['v4']['10.10.1.3']['interface'] == 'Ethernet4' }}"
+
+## check DUT won't reply ARP and install ARP entry when ARP request coming from other interfaces
+- name: Clear DUT arp cache
+ command: ip nei flush all
+ become: yes
+
+- name: Send correct arp packets from other interface expect no reply(10.10.1.4 to 10.10.1.2 with src_mac=00:02:07:08:09:0a)
+ command: ptf --test-dir acstests arptest.WrongIntNoReply --platform remote -t "acs_mac='{{ ansible_Ethernet4['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get DUT arp table
+ switch_arptable:
+ register: arptable2
+
+- name: Check ARP request coming in from other interface should not be installed in DUT ARP table, should be dropped
+ assert:
+ that:
+ - "'{{ item.key }}|string' != '10.10.1.4'"
+ with_dict: arptable2.arptable.v4
+
+## check DUT won't reply ARP and install ARP entry when src address is not in interface subnet range
+- name: Clear DUT arp cache
+ command: ip nei flush all
+ become: yes
+
+- name: Send Src IP out of interface subnet range arp packets, expect no reply and no arp table entry (10.10.1.22 to 10.10.1.2 with src_mac=00:03:07:08:09:0a)
+ command: ptf --test-dir acstests arptest.SrcOutRangeNoReply --platform remote -t "acs_mac='{{ ansible_Ethernet4['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get DUT arp table
+ switch_arptable:
+ register: arptable3
+
+- name: Check ARP request from out of range address will not be installed in DUT ARP table, should be dropped
+ assert:
+ that:
+ - "'{{ item.key }}|string' != '10.10.1.22'"
+ with_dict: arptable3.arptable.v4
+
+## Test Gratuitous ARP behavior, no Gratuitous ARP installed when arp was not resolved before
+- name: Clear DUT arp cache
+ command: ip nei flush all
+ become: yes
+
+- name: Send garp packets (10.10.1.7 to 10.10.1.7)
+ command: ptf --test-dir acstests arptest.GarpNoUpdate --platform remote -t "acs_mac='{{ ansible_Ethernet4['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get DUT arp table
+ switch_arptable:
+ register: arptable5
+
+- name: Check this GARP request will not be installed in DUT ARP table, should be ignored
+ assert:
+ that:
+ - "'{{ item.key }}|string' != '10.10.1.7'"
+ with_dict: arptable5.arptable.v4
+
+# Test Gratuitous ARP update case, when received garp, no arp reply, update arp table if it was solved before
+- name: Send correct arp packets (10.10.1.3 to 10.10.1.2 with src_mac=00:06:07:08:09:0a)
+ command: ptf --test-dir acstests arptest.ExpectReply --platform remote -t "acs_mac='{{ ansible_Ethernet4['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get DUT arp table
+ switch_arptable:
+ register: arptable6
+
+- name: Check ACS ARP table and confirm macaddress and interface are correct
+ assert:
+ that:
+ - "{{ arptable6['arptable']['v4']['10.10.1.3']['macaddress'] == '00:06:07:08:09:0a' }}"
+ - "{{ arptable6['arptable']['v4']['10.10.1.3']['interface'] == 'Ethernet4' }}"
+
+- pause: seconds=2
+
+- name: Send garp packets to update arp table(10.10.1.3 to 10.10.1.3 with src_mac=00:00:07:08:09:0a)
+ command: ptf --test-dir acstests arptest.GarpUpdate --platform remote -t "acs_mac='{{ ansible_Ethernet4['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get DUT arp table
+ switch_arptable:
+ register: arptable7
+
+- name: Check ACS ARP table and confirm macaddress and interface are updated correctly by garp
+ assert:
+ that:
+ - "{{ arptable7['arptable']['v4']['10.10.1.3']['macaddress'] == '00:00:07:08:09:0a' }}"
+ - "{{ arptable7['arptable']['v4']['10.10.1.3']['interface'] == 'Ethernet4' }}"
+
+# Recover DUT interface IP Address before entering this test case
+- name: Recover DUT IP address based on minigraph
+ command: /sbin/ifconfig {{item.name}} {{ item.addr }} netmask {{ item.mask }}
+ become: yes
+ with_items: minigraph_interfaces
+ when: item.name == 'Ethernet4' or item.name == 'Ethernet8'
+
diff --git a/ansible/roles/test/tasks/bgp_entry_flap.yml b/ansible/roles/test/tasks/bgp_entry_flap.yml
new file mode 100644
index 00000000000..4753cbe4e19
--- /dev/null
+++ b/ansible/roles/test/tasks/bgp_entry_flap.yml
@@ -0,0 +1,92 @@
+- debug: msg="testing bgp neighbor {{ item }}"
+
+- set_fact:
+ addr: "{{ item['addr'] }}"
+ asn: "{{ item['asn'] }}"
+ name: "{{ item['name'] }}"
+ peer_addr: "{{ item['peer_addr'] }}"
+
+- set_fact:
+ hwsku: "{{ acs_devices[name]['hwsku'] }}"
+ cred: "{{ switch_login[acs_devices[name]['hwsku']] }}"
+
+- name: Get ASIC tables
+ switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes
+ become: yes
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i sswsyncd python
+
+- block:
+ - name: Assert the particular entry is in nexthopgroup table
+ assert:
+ that: nexthop[addr] in nexthopgroup[item]
+ with_items: "{{ nexthopgroup }}"
+
+ - name: Gathering minigraph facts about neighbor
+ minigraph_facts: host={{ name }} filename="{{ vmhost_num }}-{{ name }}.xml"
+ connection: local
+ become: no
+
+ - name: Shut down BGP session from neighbor
+ action: cisco template=bgp_neighbor_shut.j2
+ args:
+ host: "{{ minigraph_mgmt_interface.addr }}"
+ login: "{{ cred }}"
+ connection: cisco
+
+ - name: Pause for ASIC reprogramming
+ pause: seconds=10
+
+ - name: Debug Current Peer Info
+ debug: msg="Current Peer = {{ item }}"
+
+ - name: Update list of current nexthop group(s)
+ switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes
+ become: yes
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i sswsyncd python
+
+ - name: Poll for updated tables until peer is not in nexthop groups
+ switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes
+ become: yes
+ register: table_result
+ until: "{{ ( table_result.nexthop[addr] not in nexthopgroup[item] ) or ( table_result.nexthopgroup[item] is undefined ) }}"
+ retries: 6
+ delay: 10
+ with_items: "{{ nexthopgroup }}"
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i sswsyncd python
+
+ - name: Restart BGP session from neighbor
+ action: cisco template=bgp_neighbor_noshut.j2
+ args:
+ host: "{{ minigraph_mgmt_interface.addr }}"
+ login: "{{ cred }}"
+ connection: cisco
+
+ - name: Pause for ASIC reprogramming
+ pause: seconds=10
+
+ - name: Update list of current nexthop group(s)
+ switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes
+ become: yes
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i sswsyncd python
+
+ - name: Poll for updated tables until peer is in nexthop groups
+ switch_tables: asic="{{asic}}" nexthop=yes nexthopgroup=yes
+ become: yes
+ register: table_result
+ until: "{{ ( table_result.nexthopgroup[item] is defined ) and ( table_result.nexthop[addr] in table_result.nexthopgroup[item] ) }}"
+ retries: 6
+ delay: 10
+ with_items: "{{ nexthopgroup }}"
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i sswsyncd python
+
+ when: "'T2' in name"
diff --git a/ansible/roles/test/tasks/bgp_flap.yml b/ansible/roles/test/tasks/bgp_flap.yml
new file mode 100644
index 00000000000..192886cb298
--- /dev/null
+++ b/ansible/roles/test/tasks/bgp_flap.yml
@@ -0,0 +1,26 @@
+# Shuotian Cheng
+#
+# This test is part of sswsyncd functionality tests.
+#
+# In this test, I try to iteratively shutdown and bring up BGP session from
+# the DUT spine neighbor side and test after a certain time period that the
+# nexthopgroup table is updated according to the certain BGP session flappings.
+- set_fact:
+ acs_devices: "{{ minigraph_devices }}"
+ bgp_neighbors: "{{ minigraph_bgp }}"
+
+- debug: var=sonic_asic_type
+
+- set_fact: asic="{{ sonic_asic_type }}"
+
+- include: bgp_nei_up.yml
+ with_items: bgp_neighbors
+ when: "'T2' in item['name']"
+
+- include: bgp_entry_flap.yml
+ with_items: bgp_neighbors
+
+- name: recover minigraph facts about the device(above steps loaded neighbors configuration)
+ minigraph_facts: host="{{ inventory_hostname }}"
+ connection: local
+ become: no
diff --git a/ansible/roles/test/tasks/bgp_nei_up.yml b/ansible/roles/test/tasks/bgp_nei_up.yml
new file mode 100644
index 00000000000..2fe6651c1cb
--- /dev/null
+++ b/ansible/roles/test/tasks/bgp_nei_up.yml
@@ -0,0 +1,20 @@
+- set_fact:
+ asn: "{{ item['asn'] }}"
+ name: "{{ item['name'] }}"
+ peer_addr: "{{ item['peer_addr'] }}"
+
+- set_fact:
+ hwsku: "{{ acs_devices[name]['hwsku'] }}"
+ cred: "{{ switch_login[acs_devices[name]['hwsku']] }}"
+
+- name: Gathering minigraph facts about neighbor
+ minigraph_facts: host={{ name }} filename="{{ vmhost_num }}-{{ name }}.xml"
+ connection: local
+ become: no
+
+- name: Configure BGP session of neighbor router to up
+ action: cisco template=bgp_neighbor_noshut.j2
+ args:
+ host: "{{ minigraph_mgmt_interface.addr }}"
+ login: "{{ cred }}"
+ connection: cisco
diff --git a/ansible/roles/test/tasks/copp.yml b/ansible/roles/test/tasks/copp.yml
new file mode 100644
index 00000000000..50294b305ff
--- /dev/null
+++ b/ansible/roles/test/tasks/copp.yml
@@ -0,0 +1,39 @@
+- block:
+ - fail: msg="Please set ptf_host variable"
+ when: ptf_host is not defined
+
+ - name: Disable Mellanox copp rate limiting
+ script: roles/test/files/mlnx/disable_copp_rate_limiting.sh
+ when: minigraph_hwsku is defined and minigraph_hwsku == 'ACS-MSN2700'
+
+ - name: Remove existing ip from ptf host
+ script: roles/test/files/helpers/remove_ip.sh
+ delegate_to: "{{ ptf_host }}"
+
+ - name: Install test ip to ptf host
+ script: roles/test/files/helpers/add_ip.sh
+ delegate_to: "{{ ptf_host }}"
+
+ - name: copy the test to ptf container
+ copy: src=roles/test/files/saitests dest=/root
+ delegate_to: "{{ ptf_host }}"
+
+ - include: copp_ptf.yml
+ vars:
+ test_name: COPP test - {{ item }}
+ test_path: copp_tests.{{ item }}
+ test_params: ""
+ with_items:
+ - ARPTest
+ - DHCPTest
+ - LLDPTest
+ - BGPTest
+ - LACPTest
+ - SNMPTest
+ - SSHTest
+ - IP2METest
+
+ always:
+ - name: Remove existing ip from ptf host
+ script: roles/test/files/helpers/remove_ip.sh
+ delegate_to: "{{ ptf_host }}"
diff --git a/ansible/roles/test/tasks/copp_ptf.yml b/ansible/roles/test/tasks/copp_ptf.yml
new file mode 100644
index 00000000000..60509e33f0c
--- /dev/null
+++ b/ansible/roles/test/tasks/copp_ptf.yml
@@ -0,0 +1,14 @@
+# FIXME: Use one common ptf_run.yml. Merge this file with sai_ptf.yml
+- name: "{{ test_name }}"
+ shell: ptf --test-dir saitests {{ test_path }} --qlen=10000 --platform nn -t "verbose=True;dst_mac='{{ ansible_Ethernet0['macaddress'] }}';" --device-socket 0-3@tcp://127.0.0.1:10900 --device-socket 1-3@tcp://{{ ansible_eth0['ipv4']['address'] }}:10900 --disable-ipv6 --disable-vxlan --disable-geneve --disable-erspan --disable-mpls --disable-nvgre 2>&1
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+ failed_when: False
+ register: out
+
+- debug: var=out.stdout_lines
+ when: out.rc != 0
+
+- fail: msg="Failed test '{{ test_name }}'"
+ when: out.rc != 0
diff --git a/ansible/roles/test/tasks/dscp_mapping.yml b/ansible/roles/test/tasks/dscp_mapping.yml
new file mode 100644
index 00000000000..dfce9d593b2
--- /dev/null
+++ b/ansible/roles/test/tasks/dscp_mapping.yml
@@ -0,0 +1,30 @@
+- name: copy dscp_mapping.py
+ copy: src=roles/test/files/acstests/dscp_mapping.py
+ dest=/root/acstests/dscp_mapping.py
+ delegate_to: "{{ ptf_host }}"
+
+- name: Send Arp packets to populate the ARP table in ACS
+ command: ptf --test-dir acstests --platform remote dscp_mapping.ArpPopulate -t "router_mac='{{ ansible_Ethernet0['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Clear switch counters
+ clear_switch_counters:
+
+- name: Send dscp packets
+ command: ptf --test-dir acstests --platform remote dscp_mapping.DscpMappingTest -t "router_mac='{{ ansible_Ethernet0['macaddress'] }}'"
+ args:
+ chdir: /root
+ delegate_to: "{{ ptf_host }}"
+
+- name: Get switch counters
+ switch_counters:
+
+- name: Check switch counters
+ assert:
+ that:
+ - "{{ switch_counters['Ethernet4']['0']['ucq']['pkt'] == 61 }}"
+ - "{{ switch_counters['Ethernet4']['1']['ucq']['pkt'] == 1 }}"
+ - "{{ switch_counters['Ethernet4']['3']['ucq']['pkt'] == 1 }}"
+ - "{{ switch_counters['Ethernet4']['4']['ucq']['pkt'] == 1 }}"
diff --git a/ansible/roles/test/tasks/ecmp.yml b/ansible/roles/test/tasks/ecmp.yml
new file mode 100644
index 00000000000..654e47de22b
--- /dev/null
+++ b/ansible/roles/test/tasks/ecmp.yml
@@ -0,0 +1,33 @@
+- block:
+ - fail: msg="Please set ptf_host variable"
+ when: ptf_host is not defined
+
+ - name: Remove existing ip from ptf host
+ script: roles/test/files/helpers/remove_ip.sh
+ delegate_to: "{{ ptf_host }}"
+
+ - name: Install test ip to ptf host
+ script: roles/test/files/helpers/add_ip.sh
+ delegate_to: "{{ ptf_host }}"
+
+ - name: copy the test to ptf container
+ copy: src=roles/test/files/saitests dest=/root
+ delegate_to: "{{ ptf_host }}"
+
+ - name: Install routes on the switch
+ script: roles/test/files/helpers/add_routes.sh
+
+ - include: qos_sai_ptf.yml
+ vars:
+ test_name: ECMP test
+ test_path: ecmp_test.ECMPtest
+ test_params: ""
+ extra_options: "--relax --log-dir /tmp/"
+
+ always:
+ - name: Remove routes from the switch
+ script: roles/test/files/helpers/remove_routes.sh
+
+ - name: Remove existing ip from ptf host
+ script: roles/test/files/helpers/remove_ip.sh
+ delegate_to: "{{ ptf_host }}"
diff --git a/ansible/roles/test/tasks/interface_up_down.yml b/ansible/roles/test/tasks/interface_up_down.yml
new file mode 100644
index 00000000000..801380cd185
--- /dev/null
+++ b/ansible/roles/test/tasks/interface_up_down.yml
@@ -0,0 +1,101 @@
+# This playbook tests neighbor interface flap and ACS interface status work properly
+#
+- name: Gathering minigraph facts about the device
+ minigraph_facts: host={{ inventory_hostname }}
+ connection: local
+ become: no
+
+- name: Ensure LLDP Daemon started and Enabled
+ become: true
+ service: name=lldpd
+ state=started
+ enabled=yes
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i lldp python
+
+- name: Gather information from lldp
+ lldp:
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i lldp python
+
+- name: If underlay exists, use underlay to replace it.
+ set_fact:
+ minigraph_neighbors: "{{ minigraph_underlay_neighbors }}"
+ minigraph_devices: "{{ minigraph_underlay_devices }}"
+ when: minigraph_underlay_neighbors is defined
+
+- name: no shutdown neighbor interfaces if it was down based on the graph file
+ action: cisco template=neighbor_interface_no_shut_single.j2
+ args:
+ host: "{{ minigraph_devices[minigraph_neighbors[item]['name']]['mgmt_addr'] }}"
+ login: "{{ switch_login[minigraph_devices[minigraph_neighbors[item]['name']]['hwsku']] }}"
+ skip_default_user: "yes"
+ connection: cisco
+ with_items: "{{ minigraph_neighbors.keys() | difference(lldp.keys()) }}"
+
+- name: sleep for some time
+ pause: seconds=5
+
+- name: gather interface facts
+ setup:
+
+- name: verify all local interfaces are up
+ assert: { that: "ansible_{{ item }}['active'] == true" }
+ with_items: ansible_interfaces
+ when:
+ - item | match("Ethernet.*")
+
+- name: Gather information from lldp again after ports are enabled
+ lldp:
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i lldp python
+
+- name: rearrange lldp received data structure for interface up down test
+ interface_up_down_data_struct_facts: data="{{ lldp }}"
+ connection: local
+ become: no
+
+- name: shutdown neighbor interfaces
+ action: cisco template=neighbor_interface_shut.j2
+ args:
+ host: "{{ item }}"
+ login: "{{ switch_login[ansible_interface_up_down_data_struct_facts[item]['nei_device_type']] }}"
+ skip_default_user: "yes"
+ connection: cisco
+ with_items: ansible_interface_up_down_data_struct_facts.keys()
+
+- name: sleep for some time
+ pause: seconds=5
+
+- name: gather interface facts
+ setup:
+
+- name: verify all local interfaces are down
+ assert: { that: "ansible_{{ item }}['active'] == false" }
+ with_items: ansible_interfaces
+ when:
+ - item | match("Ethernet.*")
+
+- name: no shutdown neighbor interfaces
+ action: cisco template=neighbor_interface_no_shut.j2
+ args:
+ host: "{{ item }}"
+ login: "{{ switch_login[ansible_interface_up_down_data_struct_facts[item]['nei_device_type']] }}"
+ skip_default_user: "yes"
+ connection: cisco
+ with_items: ansible_interface_up_down_data_struct_facts.keys()
+
+- name: sleep for some time
+ pause: seconds=5
+
+- name: gather interface facts
+ setup:
+
+- name: verify all local interfaces are up
+ assert: { that: "ansible_{{ item }}['active'] == true" }
+ with_items: ansible_interfaces
+ when:
+ - item | match("Ethernet.*")
diff --git a/ansible/roles/test/tasks/link_entry_flap.yml b/ansible/roles/test/tasks/link_entry_flap.yml
new file mode 100644
index 00000000000..c2d2424b440
--- /dev/null
+++ b/ansible/roles/test/tasks/link_entry_flap.yml
@@ -0,0 +1,54 @@
+- set_fact:
+ addr: "{{ item['addr'] }}"
+ name: "{{ item['name'] }}"
+ peer_addr: "{{ item['peer_addr'] }}"
+
+- name: Get Broadcom ASIC tables
+ switch_tables: asic='broadcom' nexthop=yes
+ become: yes
+
+- name: Assert the particular entry is in nexthop table
+ assert:
+ that:
+ - peer_addr in nexthop
+
+- set_fact:
+ interface: "{{ minigraph_underlay_neighbors[name]['port'] }}"
+
+- name: Shut down link from fanout switch
+ action: cisco template=port_shut.j2 enable=no
+ args:
+ host: "{{ minigraph_underlay_devices[minigraph_underlay_neighbors[name]['name']]['mgmt_addr'] }}"
+ login: "{{ switch_login[minigraph_underlay_devices[minigraph_underlay_neighbors[name]['name']]['hwsku']] }}"
+ connection: cisco
+
+- name: Pause for 60 seconds
+ pause: seconds=60
+
+- name: Get Broadcom ASIC tables
+ switch_tables: asic='broadcom' nexthop=yes
+ become: yes
+
+- name: Assert the particular entry is not in nexthop table
+ assert:
+ that:
+ - peer_addr not in nexthop
+
+- name: Bring up link from fanout switch
+ action: cisco template=port_noshut.j2 enable=no
+ args:
+ host: "{{ minigraph_underlay_devices[minigraph_underlay_neighbors[name]['name']]['mgmt_addr'] }}"
+ login: "{{ switch_login[minigraph_underlay_devices[minigraph_underlay_neighbors[name]['name']]['hwsku']] }}"
+ connection: cisco
+
+- name: Pause for 60 seconds
+ pause: seconds=60
+
+- name: Get Broadcom ASIC tables
+ switch_tables: asic='broadcom' nexthop=yes
+ become: yes
+
+- name: Assert the particular entry is in nexthop table
+ assert:
+ that:
+ - peer_addr in nexthop
diff --git a/ansible/roles/test/tasks/link_flap.yml b/ansible/roles/test/tasks/link_flap.yml
new file mode 100644
index 00000000000..808e1ca3e6b
--- /dev/null
+++ b/ansible/roles/test/tasks/link_flap.yml
@@ -0,0 +1,15 @@
+# Shuotian Cheng
+#
+# This test is part of sswsyncd functionality tests.
+#
+# In this test, I try to iteratively shutdown and bring up physical interface
+# from the DUT spine neighbor side by controlling the underlay fan-out switch.
+# Then, I check the neighbor table to ensure certian entries are updated
+# according to the certain link flappings.
+
+- name: Gathering minigraph facts about the device
+ minigraph_facts: host={{ inventory_hostname }}
+ connection: local
+
+- include: link_entry_flap.yml
+ with_items: "{{ minigraph_interfaces }}"
diff --git a/ansible/roles/test/tasks/lldp.yml b/ansible/roles/test/tasks/lldp.yml
new file mode 100644
index 00000000000..17199bb358c
--- /dev/null
+++ b/ansible/roles/test/tasks/lldp.yml
@@ -0,0 +1,41 @@
+# Gather minigraph facts
+- name: Gathering minigraph facts about the device
+ minigraph_facts: host={{ inventory_hostname }}
+ become: no
+ connection: local
+
+- name: Print neighbors in minigraph
+ debug: msg="{{ minigraph_neighbors }}"
+
+- name: Print underlay neighbors in minigraph
+ debug: msg="{{ minigraph_underlay_neighbors }}"
+
+- name: Gather information from lldp
+ lldp:
+ vars:
+ ansible_shell_type: docker
+ ansible_python_interpreter: docker exec -i lldp python
+
+- name: If underlay exists, use it.
+ set_fact:
+ minigraph_neighbors: "{{ minigraph_underlay_neighbors }}"
+ when: minigraph_underlay_neighbors is defined
+
+- name: Compare the lldp neighbors name with minigraph neigbhors name (exclude the management port)
+ assert: { that: "'{{ lldp[item]['chassis']['name'] }}' == '{{ minigraph_neighbors[item]['name'] }}'" }
+ with_items: lldp.keys()
+ when: item != "eth0"
+ ignore_errors: yes
+ #todo - samirja and johnar - need to implement a minigraph topology that matches the fanout otherwise it errors out
+
+- name: Compare the lldp neighbors interface with minigraph neigbhor interface(exclude the management port)
+ assert: { that: "'{{ lldp[item]['port']['ifname'] }}' == '{{ minigraph_neighbors[item]['port'] }}'" }
+ with_items: lldp.keys()
+ when: item != "eth0"
+ ignore_errors: yes
+ #todo - samirja and johnar - need to implement a minigraph topology that matches the fanout otherwise it errors out
+
+- name: add host
+ add_host: name={{ lldp[item]['chassis']['mgmt-ip'] }} groups=lldp_neighbors neighbor_interface={{lldp[item]['port']['ifname']}} dut_interface={{item}} hname={{lldp[item]['chassis']['mgmt-ip'] }}
+ with_items: lldp.keys()
+ when: lldp[item]['chassis']['mgmt-ip'] is defined
diff --git a/ansible/roles/test/tasks/lldp_neighbor.yml b/ansible/roles/test/tasks/lldp_neighbor.yml
new file mode 100644
index 00000000000..99ad4a56888
--- /dev/null
+++ b/ansible/roles/test/tasks/lldp_neighbor.yml
@@ -0,0 +1,15 @@
+- name: Gather LLDP information from all neighbors by performing a SNMP walk
+ lldp_facts: host={{ hname }} version=v2c community={{ snmp_rocommunity }}
+ connection: local
+
+- name: verify the dut system name field is not empty
+ assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_sys_name'] }}' != ''"}
+
+- name: verify the dut chassis id field is not empty
+ assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_chassis_id'] }}' != ''"}
+
+- name: verify the dut system description field is not empty
+ assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_sys_desc'] }}' != ''"}
+
+- name: verify the dut port id field is published correctly
+ assert: {that: "'{{ ansible_lldp_facts[neighbor_interface]['neighbor_port_id'] }}' == dut_interface"}
diff --git a/ansible/roles/test/tasks/mac_entry_update.yml b/ansible/roles/test/tasks/mac_entry_update.yml
new file mode 100644
index 00000000000..5900fcb9b26
--- /dev/null
+++ b/ansible/roles/test/tasks/mac_entry_update.yml
@@ -0,0 +1,59 @@
+- set_fact:
+ addr: "{{ item['addr'] }}"
+ name: "{{ item['name'] }}"
+ peer_addr: "{{ item['peer_addr'] }}"
+
+- name: Get Broadcom ASIC tables
+ switch_tables: asic='broadcom' nexthop=yes neighbor=yes
+ become: yes
+
+- set_fact:
+ prev_mac_addr: "{{ neighbor[peer_addr] }}"
+ new_mac_addr: "aa:bb:cc:dd:ee:ff"
+ interface: "et1"
+
+- name: Assert the previous MAC entry is in neighbor table
+ assert:
+ that:
+ - prev_mac_addr == neighbor[peer_addr]
+
+- name: Update neighbor MAC address
+ action: cisco template=mac_neighbor_update.j2 root=yes
+ args:
+ host: "{{ minigraph_devices[minigraph_neighbors[name]['name']]['mgmt_addr'] }}"
+ login: "{{ switch_login[minigraph_devices[minigraph_neighbors[name]['name']]['hwsku']] }}"
+ connection: cisco
+
+- name: Pause for 30 seconds
+ pause: seconds=30
+
+- name: Get Broadcom ASIC tables
+ switch_tables: asic='broadcom' nexthop=yes neighbor=yes
+ become: yes
+
+- name: Asssert the new MAC entry is in neighbor table
+ assert:
+ that:
+ - new_mac_addr == neighbor[peer_addr]
+
+- set_fact:
+ new_mac_addr: "{{ prev_mac_addr }}"
+
+- name: Revert previous neighbor MAC address
+ action: cisco template=mac_neighbor_update.j2 root=yes
+ args:
+ host: "{{ minigraph_devices[minigraph_neighbors[name]['name']]['mgmt_addr'] }}"
+ login: "{{ switch_login[minigraph_devices[minigraph_neighbors[name]['name']]['hwsku']] }}"
+ connection: cisco
+
+- name: Pause for 30 seconds
+ pause: seconds=30
+
+- name: Get Broadcom ASIC tables
+ switch_tables: asic='broadcom' nexthop=yes neighbor=yes
+ become: yes
+
+- name: Assert the previous MAC entry is in neighbor table
+ assert:
+ that:
+ - prev_mac_addr == neighbor[peer_addr]
diff --git a/ansible/roles/test/tasks/mac_update.yml b/ansible/roles/test/tasks/mac_update.yml
new file mode 100644
index 00000000000..5fd37014f78
--- /dev/null
+++ b/ansible/roles/test/tasks/mac_update.yml
@@ -0,0 +1,6 @@
+- name: Gathering minigraph facts about the device
+ minigraph_facts: host={{ inventory_hostname }}
+ connection: local
+
+- include: mac_entry_update.yml
+ with_items: "{{ minigraph_interfaces }}"
diff --git a/ansible/roles/test/tasks/ntp.yml b/ansible/roles/test/tasks/ntp.yml
new file mode 100644
index 00000000000..841bfb5e914
--- /dev/null
+++ b/ansible/roles/test/tasks/ntp.yml
@@ -0,0 +1,9 @@
+- name: Check if NTP is synced
+ become: true
+ shell: ntpstat
+ register: ntpstat_result
+ until: ntpstat_result.rc == 0
+ retries: 5
+ delay: 2
+
+- debug: msg="NTP Status {{ ntpstat_result.stdout }}"
diff --git a/ansible/roles/test/tasks/snmp.yml b/ansible/roles/test/tasks/snmp.yml
new file mode 100644
index 00000000000..5bbe7df591a
--- /dev/null
+++ b/ansible/roles/test/tasks/snmp.yml
@@ -0,0 +1,10 @@
+# Gather facts with SNMP version 2
+- name: Gathering basic snmp facts about the device
+ snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }}
+ connection: local
+
+# Test SNMP is working with sysdesc
+- name: Validating SNMP was successful and Hostname is what is expected
+ assert: { that: "ansible_sysname == '{{ inventory_hostname }}'" }
+
+
diff --git a/ansible/roles/test/tasks/snmp/cpu.yml b/ansible/roles/test/tasks/snmp/cpu.yml
new file mode 100644
index 00000000000..831cc42061a
--- /dev/null
+++ b/ansible/roles/test/tasks/snmp/cpu.yml
@@ -0,0 +1,59 @@
+# Test SNMP CPU Utilization
+#
+# - Pulls CPU usage via shell commans
+# - Polls SNMP for CPU usage
+# - Difference should be < 2% (allowing float->int rounding on each result)
+#
+#
+# Requires: Ansible v1.x+
+#
+# Usage:
+#
+# sudo ansible-playbook test.yml -i str --limit 10.3.147.142 --sudo --vault-password-file ~/password.txt --tags snmp_cpu
+#
+#
+# TODO: abstract the snmp OID by SKU
+
+- block:
+ - name: Start cpu load generation
+ shell: cpu_load() { yes > /dev/null & }; cpu_load && cpu_load && cpu_load && cpu_load
+ become: yes
+
+ - name: Wait for load to reflect in SNMP
+ pause: seconds=20
+
+ # Gather facts with SNMP version 2
+ - name: Gathering basic snmp facts about the device
+ snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }} is_dell=yes
+ connection: local
+
+ - name: Pull CPU utilization via shell
+ # Explanation: Run top command with 2 iterations, 5sec delay. Discard the first iteration, then grap the CPU line from the second,
+ # subtract 100% - idle, and round down to integer.
+ shell: top -bn2 -d5 | awk '/^top -/ { p=!p } { if (!p) print }' | awk '/Cpu/ { cpu = 100 - $8 };END { print cpu }' | awk '{printf "%.0f",$1}'
+ register: shell_cpu_usage
+ become: yes
+
+ # If no value exists, fail instead of doing string->int conversion and weird math
+ - name: 'Validate SNMP CPU Utilization exists and is valid'
+ assert:
+ that: '(ansible_ChStackUnitCpuUtil5sec is defined) and ( {{ansible_ChStackUnitCpuUtil5sec|isnan}} == False ) '
+
+ - name: CPU usage from SNMP
+ debug: var=ansible_ChStackUnitCpuUtil5sec
+
+ - name: CPU usage from TOP
+ debug: var=shell_cpu_usage.stdout
+
+ - name: Difference between SNMP and TOP
+ debug: msg="{{ ((ansible_ChStackUnitCpuUtil5sec) - (shell_cpu_usage.stdout|int)) | abs() }}"
+
+ # Compare results
+ - name: 'Validating SNMP CPU utilization matches shell "top" result'
+ assert:
+ that: "{{ ((ansible_ChStackUnitCpuUtil5sec) - (shell_cpu_usage.stdout|int)) | abs() }} <= 5"
+
+ always:
+ - name: Stop cpu load generation
+ shell: killall yes
+ become: yes
diff --git a/ansible/roles/test/tasks/snmp/interfaces.yml b/ansible/roles/test/tasks/snmp/interfaces.yml
new file mode 100644
index 00000000000..93f0a9e518e
--- /dev/null
+++ b/ansible/roles/test/tasks/snmp/interfaces.yml
@@ -0,0 +1,31 @@
+# Gather facts with SNMP version 2
+- name: Gathering basic snmp facts about the device
+ snmp_facts: host={{ ansible_host }} version=v2c community={{ snmp_rocommunity }}
+ connection: local
+
+- set_fact:
+ snmp_intf: []
+
+- set_fact:
+ mg_intf: []
+
+- name: Create snmp interfaces list
+ set_fact:
+ snmp_intf: "{{snmp_intf + [item.value.name] }}"
+ with_dict: snmp_interfaces
+ when: "{{item.value.name is defined}}"
+
+- name: Create minigraph interfaces list
+ set_fact:
+ mg_intf: "{{mg_intf + [item.name] }}"
+ with_items: minigraph_interfaces
+ when: "{{item.name is defined}}"
+
+- debug: var=snmp_intf
+- debug: var=mg_intf
+
+- name: Check for missing interfaces in SNMP
+ fail: msg="Minigraph interface {{item}} not in SNMP interfaces"
+ when: "{{item not in snmp_intf}}"
+ with_items: mg_intf
+
diff --git a/ansible/roles/test/tasks/syslog.yml b/ansible/roles/test/tasks/syslog.yml
new file mode 100644
index 00000000000..13fc97a6209
--- /dev/null
+++ b/ansible/roles/test/tasks/syslog.yml
@@ -0,0 +1,110 @@
+# Basic test of syslog functionality
+#
+# - Configures DUT to send syslogs to ansible host.
+# - Start Syslog Server on ansible host.
+# - SSH to device and generate a syslog
+# - Validate syslog was received
+# - Clean up DUT
+#
+# Requires: Ansible v2+
+#
+# Usage:
+#
+# sudo ansible-playbook test.yml -i str --limit 10.3.147.142 --sudo --vault-password-file ~/password.txt --tags syslog
+#
+#
+
+- debug: msg="Starting Syslog Tests..."
+
+# Fetch the source IP of the ansible server
+- name: Get localhost facts
+ setup:
+ connection: local
+ register: local_facts
+
+#- debug: var=local_facts.ansible_facts.ansible_eth0.ipv4.address
+
+
+# Set variables for the test
+- name: Set variables for the test
+ set_fact:
+ local_srcip: "{{ local_facts.ansible_facts.ansible_eth0.ipv4.address }}"
+ original_syslog_servers: "{{ syslog_servers }}"
+ syslog_port: "{{ 65535 | random(start=65000) }}"
+
+- debug: var=local_srcip
+
+# TODO: (johnar) rsyslog template needs to be changed to allow variable port. Static the config for now...
+# Reconfigure syslog on the DUT to point at this remote host
+# set_fact:
+# syslog_servers:
+# - "{{ local_srcip }}"
+
+#- name: Reconfigure Rsyslog for Testing
+# become: true
+# template: src=../../acs/templates/rsyslog.conf.j2
+# dest=/etc/rsyslog.conf
+
+- name: Add Rsyslog destination for testing
+ become: true
+ shell: 'echo "*.* @{{ local_srcip }}:{{ syslog_port }}" >> /etc/rsyslog.conf'
+
+- name: Restart Syslog Daemon
+ become: true
+ service: name=rsyslog state=restarted
+
+# Start Syslog Server
+
+- name: Start Syslog Server on ansible localhost
+ syslog_server: timeout=10 port="{{ syslog_port }}" host="{{ local_srcip }}"
+ async: 60
+ poll: 0
+ register: syslog_sleeper
+ connection: local
+ become: no
+
+- name: Wait a little bit for service to start
+ wait_for: timeout=2
+
+# SSH to device and generate a syslog
+- name: Send test syslog
+ become: yes
+ command: logger --priority INFO Basic Test Message
+
+#- debug: var=syslog_sleeper
+
+# Retreive syslogs
+- name: Retreive syslog messages
+ async_status: jid="{{ syslog_sleeper.ansible_job_id }}"
+ register: job_result
+ until: job_result.finished
+ retries: 30
+ delay: 1
+ connection: local
+
+- debug: msg="{{ syslog_messages }}"
+
+# Reconfigure Rsyslog to to original state
+
+- set_fact:
+ syslog_servers: "{{ original_syslog_servers }}"
+
+- name: Reconfigure Rsyslog to original state
+ template: src=roles/sonic-common/templates/rsyslog.conf.j2
+ dest=/etc/rsyslog.conf
+ become: true
+
+- name: Restart Syslog Daemon
+ become: true
+ service: name=rsyslog state=restarted
+
+
+# Check Messages
+- name: Check syslog messages for the test message
+ set_fact:
+ found1: "true"
+ when: "{{ item | search('Basic Test Message') }}"
+ with_items: "{{ syslog_messages }}"
+
+- fail: msg='Unable to find test syslog "Basic Test Message"'
+ when: found1 is not defined
diff --git a/ansible/roles/test/templates/bgp_neighbor_noshut.j2 b/ansible/roles/test/templates/bgp_neighbor_noshut.j2
new file mode 100644
index 00000000000..fef0f8c4b29
--- /dev/null
+++ b/ansible/roles/test/templates/bgp_neighbor_noshut.j2
@@ -0,0 +1,11 @@
+configure
+router bgp {{ asn }}
+{% if "Arista" in hwsku %}
+no neighbor {{ peer_addr }} shutdown
+{% else %}
+neighbor {{ peer_addr }}
+no shutdown
+{% endif %}
+exit
+exit
+exit
diff --git a/ansible/roles/test/templates/bgp_neighbor_shut.j2 b/ansible/roles/test/templates/bgp_neighbor_shut.j2
new file mode 100644
index 00000000000..499094398eb
--- /dev/null
+++ b/ansible/roles/test/templates/bgp_neighbor_shut.j2
@@ -0,0 +1,11 @@
+configure
+router bgp {{ asn }}
+{% if "Arista" in hwsku %}
+neighbor {{ peer_addr }} shutdown
+{% else %}
+neighbor {{ peer_addr }}
+shutdown
+{% endif %}
+exit
+exit
+exit
diff --git a/ansible/roles/test/templates/neighbor_interface_no_shut.j2 b/ansible/roles/test/templates/neighbor_interface_no_shut.j2
new file mode 100644
index 00000000000..62ff56f7e6f
--- /dev/null
+++ b/ansible/roles/test/templates/neighbor_interface_no_shut.j2
@@ -0,0 +1,9 @@
+enable
+configure
+{% for key, value in ansible_interface_up_down_data_struct_facts[item]['nei_interfaces'].iteritems() %}
+ interface {{ value }}
+ no shutdown
+ exit
+{% endfor %}
+exit
+exit
diff --git a/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2 b/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2
new file mode 100644
index 00000000000..1ce6063c59a
--- /dev/null
+++ b/ansible/roles/test/templates/neighbor_interface_no_shut_single.j2
@@ -0,0 +1,6 @@
+configure
+ interface {{ minigraph_neighbors[item].port }}
+ no shutdown
+ exit
+exit
+exit
diff --git a/ansible/roles/test/templates/neighbor_interface_shut.j2 b/ansible/roles/test/templates/neighbor_interface_shut.j2
new file mode 100644
index 00000000000..43ef20e3174
--- /dev/null
+++ b/ansible/roles/test/templates/neighbor_interface_shut.j2
@@ -0,0 +1,9 @@
+enable
+configure
+{% for key, value in ansible_interface_up_down_data_struct_facts[item]['nei_interfaces'].iteritems() %}
+ interface {{ value }}
+ shutdown
+ exit
+{% endfor %}
+exit
+exit