diff --git a/.azure-pipelines/run-test-template.yml b/.azure-pipelines/run-test-template.yml
index e357ed5d821..4341300a72a 100644
--- a/.azure-pipelines/run-test-template.yml
+++ b/.azure-pipelines/run-test-template.yml
@@ -23,7 +23,7 @@ steps:
source: specific
project: build
pipeline: 1
- artifact: sonic-buildimage.kvm
+ artifact: sonic-buildimage.vs
runVersion: 'latestFromBranch'
runBranch: 'refs/heads/master'
displayName: "Download sonic kvm image"
diff --git a/ansible/config_sonic_basedon_testbed.yml b/ansible/config_sonic_basedon_testbed.yml
index cf9bc37836b..269b1fabfde 100644
--- a/ansible/config_sonic_basedon_testbed.yml
+++ b/ansible/config_sonic_basedon_testbed.yml
@@ -48,6 +48,11 @@
- fail: msg="The DUT you are trying to run test does not belongs to this testbed"
when: inventory_hostname not in testbed_facts['duts']
+ - name: Set default num_asic
+ set_fact:
+ num_asics: 1
+ when: num_asics is not defined
+
- name: Set default dut index
set_fact:
dut_index: "{{ testbed_facts['duts_map'][inventory_hostname]|int }}"
@@ -62,15 +67,15 @@
when: "testbed_facts['vm_base'] != ''"
when: testbed_name is defined
- - topo_facts: topo={{ topo }}
+ - topo_facts: topo={{ topo }} hwsku={{ hwsku }}
delegate_to: localhost
- name: find interface name mapping and individual interface speed if defined from dut
- port_alias: hwsku="{{ hwsku }}"
+ port_alias: hwsku="{{ hwsku }}" num_asic="{{ num_asics }}"
when: deploy is defined and deploy|bool == true
- name: find interface name mapping and individual interface speed if defined with local data
- port_alias: hwsku="{{ hwsku }}"
+ port_alias: hwsku="{{ hwsku }}" num_asic="{{ num_asics }}"
delegate_to: localhost
when: deploy is not defined or deploy|bool == false
@@ -100,10 +105,6 @@
delegate_to: localhost
when: "'dualtor' in topo"
- - name: generate y_cable simulator driver
- include_tasks: dualtor/config_y_cable_simulator.yml
- when: "'dualtor' in topo"
-
- name: set default vm file path
set_fact:
vm_file: veos
@@ -126,6 +127,12 @@
delegate_to: localhost
when: "('host_interfaces_by_dut' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)"
+ - name: find downlink portchannel configuration for T0 topology
+ set_fact:
+ portchannel_config: "{{ vm_topo_config['DUT']['portchannel_config'] | default({})}}"
+ delegate_to: localhost
+ when: "('host_interfaces_by_dut' in vm_topo_config) and ('tor' in vm_topo_config['dut_type'] | lower)"
+
- name: find any tunnel configurations
tunnel_config:
vm_topo_config: "{{ vm_topo_config }}"
@@ -149,6 +156,15 @@
- "{{ interface_to_vms }}"
- "ports"
+ # create map of VM to asic interface names
+ - name: find all interface asic names
+ set_fact:
+ vm_asic_ifnames: "{{ vm_asic_ifnames | default({}) | combine({item.0.name: vm_asic_ifnames[item.0.name]|default([]) + [ front_panel_asic_ifnames[item.1]] }) }}"
+ with_subelements:
+ - "{{ interface_to_vms }}"
+ - "ports"
+ when: front_panel_asic_ifnames != []
+
- name: create minigraph file in ansible minigraph folder
template: src=templates/minigraph_template.j2
dest=minigraph/{{ inventory_hostname}}.{{ topo }}.xml
@@ -352,6 +368,11 @@
- debug: msg={{ docker_status.stdout_lines }}
+ - name: start topology service for multi-asic platform
+ become: true
+ shell: systemctl start topology.service
+ when: start_topo_service is defined and start_topo_service|bool == true
+
- name: execute cli "config load_minigraph -y" to apply new minigraph
become: true
shell: config load_minigraph -y
diff --git a/ansible/devutil/conn_graph_helper.py b/ansible/devutil/conn_graph_helper.py
new file mode 100644
index 00000000000..5d6f3b225f2
--- /dev/null
+++ b/ansible/devutil/conn_graph_helper.py
@@ -0,0 +1,28 @@
+import os
+import inspect
+import sys
+import imp
+
+CONN_GRAPH_LOG = "/tmp/conn_graph_debug.txt"
+
+def get_conn_graph_facts(hostnames):
+ """
+ @summary: Load conn_graph_facts from conn_graph_facts.xml
+ @param hostnames: A list of hostname
+ @return: A dict, conn_graph_facts
+ """
+ filename = inspect.getframeinfo(inspect.currentframe()).filename
+ ansible_path = os.path.join(os.path.dirname(os.path.abspath(filename)), '../')
+ if ansible_path not in sys.path:
+ sys.path.append(ansible_path)
+
+ utils = imp.load_source('conn_graph_utils', os.path.join(ansible_path, 'library/conn_graph_facts.py'))
+ utils.LAB_GRAPHFILE_PATH = os.path.join(ansible_path, utils.LAB_GRAPHFILE_PATH)
+ utils.debug_fname = CONN_GRAPH_LOG
+
+ lab_graph = utils.find_graph(hostnames=hostnames, part=True)
+ succeed, results = utils.build_results(lab_graph=lab_graph, hostnames=hostnames, ignore_error=True)
+ if not succeed:
+ print("Parse conn graph failes msg = {}".format(results))
+ return {'device_pdu_info': {}, 'device_pdu_links': {}}
+ return results
diff --git a/ansible/devutils b/ansible/devutils
index d0564bb84a4..42738977d24 100755
--- a/ansible/devutils
+++ b/ansible/devutils
@@ -13,15 +13,17 @@ from tabulate import tabulate
from devutil.inv_helpers import HostManager
from devutil.ssh_utils import SSHClient
from devutil.task_runner import TaskRunner
+from devutil import conn_graph_helper
+
import sys
sys.path.append("..")
-from tests.common.connections import ConsoleHost
-from tests.common.plugins.pdu_controller.snmp_pdu_controllers import get_pdu_controller
+from tests.common.connections.console_host import ConsoleHost
+from tests.common.plugins.pdu_controller.pdu_manager import pdu_manager_factory
g_inv_mgr = None
g_task_runner = None
g_pdu_dict = {}
-
+g_conn_graph_facts = {}
def run_cmd(cmd):
'''
@@ -34,34 +36,103 @@ def run_cmd(cmd):
stdout, stderr = out.communicate()
return out.returncode, stdout, stderr
+def get_conn_graph_facts(hosts):
+
+ global g_conn_graph_facts
+ if g_conn_graph_facts:
+ return g_conn_graph_facts
+ hostnames = hosts.keys()
+ g_conn_graph_facts = conn_graph_helper.get_conn_graph_facts(hostnames)
+ return g_conn_graph_facts
def build_global_vars(concurrency, inventory):
global g_task_runner, g_inv_mgr
g_task_runner = TaskRunner(max_worker=concurrency)
g_inv_mgr = HostManager(inventory)
-
def retrieve_hosts(group, limit):
global g_inv_mgr
return g_inv_mgr.get_host_list(group, limit)
-
-def get_pdu_info(pdu_host):
+def get_pdu_info_from_conn_graph(hostname):
+ """
+ Read pdu info from conn graph.
+ Returns a dict like this:
+ {'PSU1': {'Protocol': 'snmp', 'ManagementIp': '10.3.155.107', 'HwSku': 'Sentry', 'Type': 'Pdu', 'peerdevice': 'pdu-107', 'peerport': '39'},
+ 'PSU2': xxx}
+ """
+ global g_inv_mgr, g_conn_graph_facts
+ results = {}
+ if hostname in g_conn_graph_facts['device_pdu_info']:
+ for pdu_idx, pdu_info in g_conn_graph_facts['device_pdu_info'][hostname].items():
+ pdu_name = g_conn_graph_facts['device_pdu_links'][hostname][pdu_idx]['peerdevice']
+ results[pdu_name] = {}
+ results[pdu_name]['ansible_host'] = pdu_info['ManagementIp']
+ results[pdu_name]['protocol'] = pdu_info['Protocol']
+ results[pdu_name]['pdu_port'] = g_conn_graph_facts['device_pdu_links'][hostname][pdu_idx]['peerport']
+ results[pdu_name].update(g_inv_mgr.get_host_vars(pdu_name))
+ return results
+
+
+def get_pdu_info_from_inventory(attrs):
+ """
+ Read pdu info from inventory. This should be a fallback of get_pdu_info_from_conn_graph
+ """
global g_inv_mgr, g_pdu_dict
+ pdus = {}
+ pdu_host = attrs['pdu_host'] if 'pdu_host' in attrs else None
+ if not pdu_host:
+ return (False, 'DUT has no PDU configuration')
if pdu_host in g_pdu_dict:
- return g_pdu_dict[pdu_host]
+ return (True, g_pdu_dict[pdu_host])
hosts = retrieve_hosts('all', pdu_host)
- pdus=[]
- g_pdu_dict[pdu_host] = pdus
+ if not hosts:
+ return (False, 'PDU not found in inventory')
+
+ g_pdu_dict[pdu_host] = hosts
for ph in pdu_host.split(','):
if ph in hosts:
pdu = hosts[ph]
- pdus.append(pdu)
-
- return pdus
-
+ pdus[ph] = pdu
+ return (True, pdus)
+
+def get_pdu_info(dut_hostname, attrs):
+ results = get_pdu_info_from_conn_graph(dut_hostname)
+ if results:
+ return (True, results)
+ return get_pdu_info_from_inventory(attrs)
+
+def get_console_info_from_conn_graph(hostname):
+ """
+ Read console info from conn_graph_facts.
+ """
+ console_info = {}
+ if hostname in g_conn_graph_facts['device_console_info'] and g_conn_graph_facts['device_console_info'][hostname]:
+ console_info['console_type'] = g_conn_graph_facts['device_console_info'][hostname]['Protocol']
+ console_info['console_host'] = g_conn_graph_facts['device_console_info'][hostname]['ManagementIp']
+ console_info['console_port'] = g_conn_graph_facts['device_console_link'][hostname]['ConsolePort']['peerport']
+ return console_info
+
+def get_console_info_from_inventory(attrs):
+ """
+ Read console info from inventory file. This should be a fallback of get_console_info_from_conn_graph.
+ """
+ console_info = {}
+ keys = ['console_type', 'console_host', 'console_port']
+ for k in keys:
+ if k in attrs:
+ console_info[k] = attrs[k]
+ return console_info
+
+def get_console_info(hostname, attrs):
+ console_info = get_console_info_from_conn_graph(hostname)
+ if not console_info:
+ console_info = get_console_info_from_inventory(attrs)
+ if not console_info:
+ print("Failed to get console info for {}".format(hostname))
+ return console_info
def show_data_output(header, data, json_output=False):
if json_output:
@@ -79,7 +150,7 @@ def action_list(parameters):
data.append(dict(zip(header, (name, vars['ansible_host']))))
else:
for name, vars in hosts.items():
- data.append((name, vars['ansible_host']))
+ data.append((name, vars['ansible_host'] if 'ansible_host' in vars else 'not_available'))
show_data_output(header, data, parameters['json'])
@@ -92,7 +163,8 @@ def action_ping(parameters):
g_task_runner.submit_task(name + '|' + vars['ansible_host'], run_cmd, cmd=cmd)
if parameters['json']:
for name, result in g_task_runner.task_results():
- data.append(dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
+ data.append(
+ dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
else:
for name, result in g_task_runner.task_results():
data.append((name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))
@@ -104,7 +176,8 @@ def action_ping(parameters):
g_task_runner.submit_task(name + '|' + vars['ansible_hostv6'], run_cmd, cmd=cmd)
if parameters['json']:
for name, result in g_task_runner.task_results():
- data.append(dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
+ data.append(
+ dict(zip(header, (name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))))
else:
for name, result in g_task_runner.task_results():
data.append((name.split('|')[0], name.split('|')[1], 'Success' if result['result'][0] == 0 else "Fail"))
@@ -116,65 +189,50 @@ def action_ssh(parameters):
hosts = parameters['hosts']
for _, vars in hosts.items():
client = SSHClient()
- client.connect(hostname=vars['ansible_host'], username=vars['creds']['username'], passwords=vars['creds']['password'])
+ client.connect(hostname=vars['ansible_host'], username=vars[
+ 'creds']['username'], passwords=vars['creds']['password'])
client.posix_shell()
def action_console(parameters):
hosts = parameters['hosts']
- # Todo: Retrieve console vars from conn_graph_fact
- for _, vars in hosts.items():
- console_host = ConsoleHost(console_type=vars['console_type'],
- console_host=vars['console_host'],
- console_port=vars['console_port'],
- sonic_username=vars['creds']['username'],
- sonic_password=vars['creds']['password'],
- console_username=vars['creds']['console_user'][vars['console_type']],
- console_password=vars['creds']['console_password'][vars['console_type']])
+ for hostname, vars in hosts.items():
+ console_info = get_console_info(hostname, vars)
+ if not console_info:
+ continue
+ console_host = ConsoleHost(console_type=console_info['console_type'],
+ console_host=console_info['console_host'],
+ console_port=console_info['console_port'],
+ sonic_username=vars['creds']['username'],
+ sonic_password=vars['creds']['password'],
+ console_username=vars['creds']['console_user'][console_info['console_type']],
+ console_password=vars['creds']['console_password'][console_info['console_type']])
console_host.posix_shell()
def pdu_action_on_dut(host, attrs, action):
- ret = { 'Host' : host, 'PDU status' : [], 'Summary' : [], 'Action' : action }
- pdu_name = attrs['pdu_host'] if 'pdu_host' in attrs else None
- if not pdu_name:
- ret['Summary'].append('DUT has no PDU configuration')
+ ret = {'Host': host, 'PDU status': [], 'Summary': [], 'Action': action}
+ succeed, pdu_info = get_pdu_info(host, attrs)
+ if not succeed:
+ ret['Summary'] = pdu_info
return ret
+ pduman = pdu_manager_factory(host, pdu_info, g_conn_graph_facts, pdu_info.values()[0])
- pdu_list = get_pdu_info(pdu_name)
- if not pdu_list:
- ret['Summary'].append('PDU not found in inventory')
+ if not pduman:
+ ret['Summary'].append('Failed to communicate with PDU controller {}'.format(pdu_info.keys()))
return ret
- for pdu_info in pdu_list:
- pdu_host = pdu_info['ansible_host'] if pdu_info and 'ansible_host' in pdu_info else None
- p_name = pdu_info['inventory_hostname'] if pdu_info and 'inventory_hostname' in pdu_info else None
- if not pdu_host or not p_name:
- ret['Summary'].append('No PDU IP or name')
- continue
-
- controller = get_pdu_controller(pdu_host, host, pdu_info)
-
- if not controller:
- ret['Summary'].append('Failed to communicate with controller {}'.format(p_name))
- continue
-
- status = controller.get_outlet_status()
- if action == 'off':
- for outlet in status:
- controller.turn_off_outlet(outlet['outlet_id'])
- status = controller.get_outlet_status()
- elif action == 'on':
- for outlet in status:
- controller.turn_on_outlet(outlet['outlet_id'])
- status = controller.get_outlet_status()
- elif action != 'status':
- ret['Summary'].append('Unsupported action {}.'.format(action))
- continue
+ if action == 'off':
+ pduman.turn_off_outlet()
+ elif action == 'on':
+ pduman.turn_on_outlet()
+ elif action != 'status':
+ ret['Summary'].append('Unsupported action {}.'.format(action))
+ return ret
- for outlet in status:
- outlet.update({ 'PDU' : p_name, 'PDU_IP' : pdu_host })
- ret['PDU status'].append(outlet)
+ status = pduman.get_outlet_status()
+ for outlet in status:
+ ret['PDU status'].append(outlet)
return ret
@@ -182,7 +240,7 @@ def pdu_action_on_dut(host, attrs, action):
def action_pdu(parameters, action):
hosts = parameters['hosts']
data = []
- header = [ 'Host', 'Action', 'PDU status', 'Summary' ]
+ header = ['Host', 'Action', 'PDU status', 'Summary']
for host, attrs in hosts.items():
g_task_runner.submit_task(host, pdu_action_on_dut, host=host, attrs=attrs, action=action)
@@ -191,8 +249,8 @@ def action_pdu(parameters, action):
if parameters['json']:
data.append(status)
else:
- data.append([ status[x] for x in header ])
-
+ data.append([status[x] for x in header])
+
return header, data
@@ -246,17 +304,20 @@ def parallel_run(parameters):
for name, result in g_task_runner.task_results():
print("task result for {} ===============>\n{}".format(name, str(result['result'][1])))
+
def ssh_run_command(hostname, username, passwords, cmd):
client = SSHClient()
client.connect(hostname=hostname, username=username, passwords=passwords)
return client.run_command(cmd)
+
def validate_args(args):
if args.action == 'run' and args.cmd == '':
print("command is missing for run action")
return False
return True
+
def main():
parser = argparse.ArgumentParser(description='Device utilities')
parser.add_argument('-6', '--ipv6', help='Include IPv6', action='store_true',
@@ -275,7 +336,8 @@ def main():
type=str, required=False)
parser.add_argument('-u', '--user', help='User: user account to login to host with, default admin',
type=str, required=False, default='admin')
- parser.add_argument('-c', '--concurrency', help='Concurrency: the max concurrency for tasks that can run simultaneously, default 1',
+ parser.add_argument(
+ '-c', '--concurrency', help='Concurrency: the max concurrency for tasks that can run simultaneously, default 1',
type=int, required=False, default=1)
parser.add_argument('-j', '--json', help='json output', action='store_true',
required=False, default=False)
@@ -283,26 +345,27 @@ def main():
args = parser.parse_args()
if not validate_args(args):
return
- build_global_vars(args.concurrency, args.inventory);
+ build_global_vars(args.concurrency, args.inventory)
hosts = retrieve_hosts(args.group, args.limit)
- actions = { 'list' : action_list,
- 'ping' : action_ping,
- 'ssh' : action_ssh,
- 'console' : action_console,
- 'run' : ssh_run_command,
- 'pdu_status' : action_pdu_status,
- 'pdu_off' : action_pdu_off,
- 'pdu_on' : action_pdu_on,
- 'pdu_reboot' : action_pdu_reboot,
- }
- parameters = { 'hosts' : hosts,
- 'limit' : args.limit,
- 'action' : actions[args.action],
- 'user' : args.user,
- 'ipv6' : args.ipv6,
- 'cmd': args.cmd,
- 'json' : args.json,
- }
+ get_conn_graph_facts(hosts)
+ actions = {'list': action_list,
+ 'ping': action_ping,
+ 'ssh': action_ssh,
+ 'console': action_console,
+ 'run': ssh_run_command,
+ 'pdu_status': action_pdu_status,
+ 'pdu_off': action_pdu_off,
+ 'pdu_on': action_pdu_on,
+ 'pdu_reboot': action_pdu_reboot,
+ }
+ parameters = {'hosts': hosts,
+ 'limit': args.limit,
+ 'action': actions[args.action],
+ 'user': args.user,
+ 'ipv6': args.ipv6,
+ 'cmd': args.cmd,
+ 'json': args.json,
+ }
action_dispatcher(parameters)
diff --git a/ansible/dualtor/config_y_cable_simulator.yml b/ansible/dualtor/config_y_cable_simulator.yml
deleted file mode 100644
index 2b9b096c1db..00000000000
--- a/ansible/dualtor/config_y_cable_simulator.yml
+++ /dev/null
@@ -1,30 +0,0 @@
----
-- fail: msg="There must be two duts in this testbed"
- when: "testbed_facts['duts']|length != 2"
-
-- fail: msg="The type of testbed must be dualtor"
- when: "'dualtor' not in testbed_facts['topo']"
-
-- fail: msg="The DUT you are trying to run test does not belongs to this testbed"
- when: inventory_hostname not in testbed_facts['duts']
-
-- name: get host server address
- vmhost_server_info: vmhost_server_name={{ testbed_facts['server'] }} vm_file='veos'
- delegate_to: localhost
-
-- name: set y cable simulator server address
- set_fact:
- mux_simulator_server: "{{ vmhost_server_address }}"
-
-- name: set default y cable simulator server port
- set_fact:
- mux_simulator_port: 8080
- when: mux_simulator_port is not defined
-
-- name: generate y cable simulator driver for {{ dut_name }}
- include_tasks: "dualtor/y_cable_simulator_injector.yml"
- loop: "{{ testbed_facts['duts'] }}"
- loop_control:
- loop_var: dut_name
- run_once: True
-
diff --git a/ansible/dualtor/y_cable_simulator_client.j2 b/ansible/dualtor/y_cable_simulator_client.j2
deleted file mode 100644
index 77ed1d5a968..00000000000
--- a/ansible/dualtor/y_cable_simulator_client.j2
+++ /dev/null
@@ -1,156 +0,0 @@
-from urllib import request, error
-import json
-from sonic_py_common import logger
-
-DUTS_MAP = {
- "{{ dual_tor_facts['positions']['upper'] }}": 0,
- "{{ dual_tor_facts['positions']['lower'] }}": 1
-}
-
-VM_SET = "{{ testbed_facts['group-name'] }}"
-
-DUT_NAME = "{{ dut_name }}"
-
-BASE_URL = "http://{{ mux_simulator_server }}:{{ mux_simulator_port }}/"
-
-SYSLOG_IDENTIFIER = "y_cable_sim"
-helper_logger = logger.Logger(SYSLOG_IDENTIFIER)
-
-UPPER_TOR = "upper_tor"
-LOWER_TOR = "lower_tor"
-
-def _url(physical_port):
- """
- Helper function to build an url for given physical_port
-
- Args:
- physical_port: physical port on switch, an integer starting from 1
- Returns:
- str: The url for post/get.
- """
- return BASE_URL + "/mux/{}/{}".format(VM_SET, physical_port - 1)
-
-def _post(physical_port, data):
- """
- Helper function for posting data to y_cable server.
-
- Args:
- physical_port: physical port on switch, an integer starting from 1
- data: data to post
- Returns:
- True if succeed. False otherwise
- """
- data = json.dumps(data).encode(encoding='utf-8')
- header = {'Accept': 'application/json', 'Content-Type': 'application/json'}
- req = request.Request(url=_url(physical_port), data=data, headers=header)
- try:
- _ = request.urlopen(req)
- except error.HTTPError as e:
- try:
- err_msg = json.loads(e.read().decode())['err_msg']
- helper_logger.log_warning("post request returns err. status_code = {} err_msg = {}".format(e.code, err_msg))
- except Exception:
- helper_logger.log_warning("post request returns err. status_code = {}".format(e.code))
- return False
- except error.URLError as e:
- helper_logger.log_warning("post request returns err. err_msg = {}".format(str(e)))
- return False
- return True
-
-def _get(physical_port):
- """
- Helper function for polling status from y_cable server.
-
- Args:
- physical_port: physical port on switch, an integer starting from 1
- Returns:
- dict: A dict decoded from server's response.
- None: Returns None is error is detected.
- """
- req = request.Request(url=_url(physical_port))
- try:
- res = request.urlopen(req)
- data = res.read()
- return json.loads(data)
- except error.HTTPError as e:
- err_msg = json.loads(e.read().decode())['err_msg']
- helper_logger.log_warning("get request returns err. status_code = {} err_msg = {}".format(e.code, err_msg))
- except error.URLError as e:
- helper_logger.log_warning("get request returns err. err_msg = {}".format(str(e)))
- except json.decoder.JSONDecodeError as e:
- helper_logger.log_warning("failed to parse response as json. err_msg = {}".format(str(e)))
- except Exception as e:
- helper_logger.log_warning("get request returns err. err_msg = {}".format(str(e)))
- return None
-
-def _toggle_to(physical_port, target):
- """
- Helper function for toggling to certain TOR.
-
- Args:
- physical_port: physical port on switch, an integer starting from 1
- target: UPPER_TOR / LOWER_TOR
- Returns:
- True if succeed. False otherwise
- """
- data = {"active_side": target}
- helper_logger.log_info("physical_port {} toggle to {}".format(physical_port, target))
- return _post(physical_port, data)
-
-def _get_side(physical_port):
- """
- Retrieve the current active tor from y_cable simulator server.
- Args:
- physical_port: physical port on switch, an integer starting from 1
- Returns:
- 1 if UPPER_TOR is active
- 2 if LOWER_TOR is active
- -1 for exception or inconstient status
- """
- res = _get(physical_port)
- if not res:
- return -1
- active_side = res["active_side"]
- if active_side == UPPER_TOR:
- return 1
- elif active_side == LOWER_TOR:
- return 2
- else:
- return -1
-
-def toggle_mux_to_torA(physical_port):
- return _toggle_to(physical_port, UPPER_TOR)
-
-def toggle_mux_to_torB(physical_port):
- return _toggle_to(physical_port, LOWER_TOR)
-
-def check_read_side(physical_port):
- return DUTS_MAP[DUT_NAME] + 1
-
-def check_mux_direction(physical_port):
- return _get_side(physical_port)
-
-def check_active_linked_tor_side(physical_port):
- return _get_side(physical_port)
-
-def check_if_link_is_active_for_NIC(physical_port):
- """
- Checks if NIC side of the Y cable's link is active.
- Always return True for now because all links in simulator are active.
- """
- return True
-
-def check_if_link_is_active_for_torA(physical_port):
- """
- Checks if UPPER_TOR side of the Y cable's link is active.
- Always return True for now because all links in simulator are active.
- """
- return True
-
-def check_if_link_is_active_for_torB(physical_port):
- """
- Checks if LOWER_TOR side of the Y cable's link is active.
- Always return True for now because all links in simulator are active.
- """
- return True
-
diff --git a/ansible/dualtor/y_cable_simulator_injector.yml b/ansible/dualtor/y_cable_simulator_injector.yml
deleted file mode 100644
index 30a1ec32663..00000000000
--- a/ansible/dualtor/y_cable_simulator_injector.yml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-- name: generate y_cable_simulator driver for {{ dut_name }}
- template: src=dualtor/y_cable_simulator_client.j2
- dest=/tmp/y_cable_simulator_client.py
- delegate_to: "{{ dut_name }}"
-
-- name: inject y_cable_simulator to pmon container for {{ dut_name }}
- shell: docker cp /tmp/y_cable_simulator_client.py pmon:/usr/lib/python3/dist-packages
- delegate_to: "{{ dut_name }}"
-
diff --git a/ansible/files/creategraph.py b/ansible/files/creategraph.py
index 5db0a55b5e0..2068daeedf8 100755
--- a/ansible/files/creategraph.py
+++ b/ansible/files/creategraph.py
@@ -8,6 +8,8 @@
DEFAULT_DEVICECSV = 'sonic_lab_devices.csv'
DEFAULT_LINKCSV = 'sonic_lab_links.csv'
+DEFAULT_CONSOLECSV = 'sonic_lab_console_links.csv'
+DEFAULT_PDUCSV = 'sonic_lab_pdu_links.csv'
LAB_CONNECTION_GRAPH_ROOT_NAME = 'LabConnectionGraph'
LAB_CONNECTION_GRAPH_DPGL2_NAME = 'DevicesL2Info'
@@ -20,45 +22,87 @@ class LabGraph(object):
infrastucture for Sonic development and testing environment.
"""
- def __init__(self, dev_csvfile=None, link_csvfile=None, graph_xmlfile=None):
+ def __init__(self, dev_csvfile=None, link_csvfile=None, cons_csvfile=None, pdu_csvfile=None, graph_xmlfile=None):
#TODO:make generated xml file name as parameters in the future to make it more flexible
self.devices = []
self.links = []
+ self.consoles = []
+ self.pdus = []
self.devcsv = dev_csvfile
self.linkcsv = link_csvfile
+ self.conscsv = cons_csvfile
+ self.pducsv = pdu_csvfile
self.png_xmlfile = 'str_sonic_png.xml'
self.dpg_xmlfile = 'str_sonic_dpg.xml'
self.one_xmlfile = graph_xmlfile
self.pngroot = etree.Element('PhysicalNetworkGraphDeclaration')
self.dpgroot = etree.Element('DataPlaneGraph')
+ self.csgroot = etree.Element('ConsoleGraphDeclaration')
+ self.pcgroot = etree.Element('PowerControlGraphDeclaration')
def read_devices(self):
- csv_dev = open(self.devcsv)
- csv_devices = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_dev))
- devices_root = etree.SubElement(self.pngroot, 'Devices')
- for row in csv_devices:
- attrs = {}
- self.devices.append(row)
- for key in row:
- if key.lower() != 'managementip':
- attrs[key]=row[key].decode('utf-8')
- prod = etree.SubElement(devices_root, 'Device', attrs)
- csv_dev.close()
+ with open(self.devcsv) as csv_dev:
+ csv_devices = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_dev))
+ devices_root = etree.SubElement(self.pngroot, 'Devices')
+ pdus_root = etree.SubElement(self.pcgroot, 'DevicesPowerControlInfo')
+ cons_root = etree.SubElement(self.csgroot, 'DevicesConsoleInfo')
+ for row in csv_devices:
+ attrs = {}
+ self.devices.append(row)
+ devtype=row['Type'].lower()
+ if 'pdu' in devtype:
+ for key in row:
+ attrs[key]=row[key].decode('utf-8')
+ etree.SubElement(pdus_root, 'DevicePowerControlInfo', attrs)
+ elif 'consoleserver' in devtype:
+ for key in row:
+ attrs[key]=row[key].decode('utf-8')
+ etree.SubElement(cons_root, 'DeviceConsoleInfo', attrs)
+ else:
+ for key in row:
+ if key.lower() != 'managementip' and key.lower() !='protocol':
+ attrs[key]=row[key].decode('utf-8')
+ etree.SubElement(devices_root, 'Device', attrs)
def read_links(self):
- csv_file = open(self.linkcsv)
- csv_links = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_file))
- links_root = etree.SubElement(self.pngroot, 'DeviceInterfaceLinks')
- for link in csv_links:
- attrs = {}
- for key in link:
- if key.lower() != 'vlanid' and key.lower() != 'vlanmode':
- attrs[key]=link[key].decode('utf-8')
- prod = etree.SubElement(links_root, 'DeviceInterfaceLink', attrs)
- self.links.append(link)
- csv_file.close()
+ with open(self.linkcsv) as csv_file:
+ csv_links = csv.DictReader(filter(lambda row: row[0]!='#' and len(row.strip())!=0, csv_file))
+ links_root = etree.SubElement(self.pngroot, 'DeviceInterfaceLinks')
+ for link in csv_links:
+ attrs = {}
+ for key in link:
+ if key.lower() != 'vlanid' and key.lower() != 'vlanmode':
+ attrs[key]=link[key].decode('utf-8')
+ etree.SubElement(links_root, 'DeviceInterfaceLink', attrs)
+ self.links.append(link)
+ def read_consolelinks(self):
+ if not os.path.exists(self.conscsv):
+ return
+ with open(self.conscsv) as csv_file:
+ csv_cons = csv.DictReader(csv_file)
+ conslinks_root = etree.SubElement(self.csgroot, 'ConsoleLinksInfo')
+ for cons in csv_cons:
+ attrs = {}
+ for key in cons:
+ attrs[key]=cons[key].decode('utf-8')
+ etree.SubElement(conslinks_root, 'ConsoleLinkInfo', attrs)
+ self.consoles.append(cons)
+
+ def read_pdulinks(self):
+ if not os.path.exists(self.pducsv):
+ return
+ with open(self.pducsv) as csv_file:
+ csv_pdus = csv.DictReader(csv_file)
+ pduslinks_root = etree.SubElement(self.pcgroot, 'PowerControlLinksInfo')
+ for pdu_link in csv_pdus:
+ attrs = {}
+ for key in pdu_link:
+ attrs[key]=pdu_link[key].decode('utf-8')
+ etree.SubElement(pduslinks_root, 'PowerControlLinkInfo', attrs)
+ self.pdus.append(pdu_link)
+
def generate_dpg(self):
for dev in self.devices:
hostname = dev.get('Hostname', '')
@@ -99,21 +143,40 @@ def create_xml(self):
root=etree.Element(LAB_CONNECTION_GRAPH_ROOT_NAME)
root.append(self.pngroot)
root.append(self.dpgroot)
+ root.append(self.csgroot)
+ root.append(self.pcgroot)
result = etree.tostring(root, pretty_print=True)
onexml.write(result)
+def get_file_names(args):
+ if not args.inventory:
+ device, links, console, pdu = args.device, args.links, args.console, args.pdu
+ else:
+ device = 'sonic_{}_devices.csv'.format(args.inventory)
+ links = 'sonic_{}_links.csv'.format(args.inventory)
+ console = 'sonic_{}_console_links.csv'.format(args.inventory)
+ pdu = 'sonic_{}_pdu_links.csv'.format(args.inventory)
+
+ return device, links, console, pdu
+
def main():
parser = argparse.ArgumentParser()
- parser.add_argument("-d", "--device", help="device file", default=DEFAULT_DEVICECSV)
- parser.add_argument("-l", "--links", help="link file", default=DEFAULT_LINKCSV)
+ parser.add_argument("-d", "--device", help="device file [deprecate warning: use -i instead]", default=DEFAULT_DEVICECSV)
+ parser.add_argument("-l", "--links", help="link file [deprecate warning: use -i instead]", default=DEFAULT_LINKCSV)
+ parser.add_argument("-c", "--console", help="console connection file [deprecate warning: use -i instead]", default=DEFAULT_CONSOLECSV)
+ parser.add_argument("-p", "--pdu", help="pdu connection file [deprecate warning: use -i instead]", default=DEFAULT_PDUCSV)
+ parser.add_argument("-i", "--inventory", help="specify inventory namei to generate device/link/console/pdu file names, default none", default=None)
parser.add_argument("-o", "--output", help="output xml file", required=True)
args = parser.parse_args()
- mygraph = LabGraph(args.device, args.links, args.output)
+ device, links, console, pdu = get_file_names(args)
+ mygraph = LabGraph(device, links, console, pdu, args.output)
mygraph.read_devices()
mygraph.read_links()
+ mygraph.read_consolelinks()
+ mygraph.read_pdulinks()
mygraph.generate_dpg()
mygraph.create_xml()
diff --git a/ansible/files/lab_connection_graph.xml b/ansible/files/lab_connection_graph.xml
index 885b21a8fd8..a4f4e48afdf 100644
--- a/ansible/files/lab_connection_graph.xml
+++ b/ansible/files/lab_connection_graph.xml
@@ -90,4 +90,30 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/ansible/files/sonic_lab_console_links.csv b/ansible/files/sonic_lab_console_links.csv
new file mode 100644
index 00000000000..c0914c54044
--- /dev/null
+++ b/ansible/files/sonic_lab_console_links.csv
@@ -0,0 +1,4 @@
+StartDevice,StartPort,EndDevice
+console-1,10,str-msn2700-01
+console-1,11,str-7260-10
+console-1,12,str-7260-11
\ No newline at end of file
diff --git a/ansible/files/sonic_lab_devices.csv b/ansible/files/sonic_lab_devices.csv
index b11a58d53d3..5ee6bf8f45e 100644
--- a/ansible/files/sonic_lab_devices.csv
+++ b/ansible/files/sonic_lab_devices.csv
@@ -1,5 +1,8 @@
-Hostname,ManagementIp,HwSku,Type
-str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic
-str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf
-str-7260-11,10.251.0.234/23,Arista-7260QX-64,FanoutRoot
-str-acs-serv-01,10.251.0.245/23,TestServ,Server
+Hostname,ManagementIp,HwSku,Type,Protocol
+str-msn2700-01,10.251.0.188/23,Mellanox-2700,DevSonic,
+str-7260-10,10.251.0.13/23,Arista-7260QX-64,FanoutLeaf,
+str-7260-11,10.251.0.234/23,Arista-7260QX-64,FanoutRoot,
+str-acs-serv-01,10.251.0.245/23,TestServ,Server,
+pdu-1,192.168.9.2,Apc,Pdu,snmp
+pdu-2,192.168.9.3,Sentry,Pdu,snmp
+console-1,192.168.10.1,Cisco,ConsoleServer,ssh
\ No newline at end of file
diff --git a/ansible/files/sonic_lab_pdu_links.csv b/ansible/files/sonic_lab_pdu_links.csv
new file mode 100644
index 00000000000..8574ca0d5f5
--- /dev/null
+++ b/ansible/files/sonic_lab_pdu_links.csv
@@ -0,0 +1,9 @@
+StartDevice,StartPort,EndDevice,EndPort
+pdu-1,1,str-msn2700-01,PSU1
+pdu-1,3,str-msn2700-01,PSU2
+pdu-1,2,str-7260-10,PSU1
+pdu-1,4,str-7260-10,PSU2
+pdu-2,1,str-7260-11,PSU1
+pdu-2,3,str-7260-11,PSU2
+pdu-2,2,str-acs-serv-01,PSU1
+pdu-2,4,str-acs-serv-01,PSU2
\ No newline at end of file
diff --git a/ansible/group_vars/vm_host/main.yml b/ansible/group_vars/vm_host/main.yml
index edbd360ef4c..b7315889568 100644
--- a/ansible/group_vars/vm_host/main.yml
+++ b/ansible/group_vars/vm_host/main.yml
@@ -1,7 +1,9 @@
+supported_vm_types: [ "veos", "sonic", "ceos" ]
root_path: veos-vm
vm_images_url: https://acsbe.blob.core.windows.net/vmimages
cd_image_filename: Aboot-veos-serial-8.0.0.iso
hdd_image_filename: vEOS-lab-4.20.15M.vmdk
+sonic_image_filename: sonic-vs.img
skip_image_downloading: false
vm_console_base: 7000
diff --git a/ansible/lab b/ansible/lab
index 806ea9bd3b4..172649a1a9a 100644
--- a/ansible/lab
+++ b/ansible/lab
@@ -11,6 +11,7 @@ all:
sonic_s6100:
sonic_a7260:
sonic_multi_asic:
+ sonic_multi_asic_2:
fanout:
hosts:
str-7260-10:
@@ -94,17 +95,46 @@ sonic_s6100:
sonic_a7260:
vars:
+ hwsku: Arista-7260CX3-C64
iface_speed: 100000
hosts:
lab-a7260-01:
ansible_host: 10.251.0.191
- hwsku: Arista-7260CX3-D108C8
+ model: DCS-7260CX3-64
+ serial: SSJ12345678
+ base_mac: 76:34:ab:08:cd:a0
+ syseeprom_info:
+ "0x21": "DCS-7260CX3-64"
+ "0x22": "ASY0250504B0"
+ "0x23": "SSJ12345678"
+ "0x24": "7634ab08cda0"
+ "0x25": "2017/06/07 12:36:05"
+ "0x26": "01"
+ "0x27": "03.00"
+ "0x28": "x86_64-arista_7260cx3_64"
+ "0x2d": "Arista Networks"
+ "0x2e": "Aboot-norcal7-7.2.3-pcie2x4-12345678"
sonic_multi_asic:
vars:
hwsku: msft_multi_asic_vs
iface_speed: 40000
+ num_asics: 6
+ start_topo_service: True
+ frontend_asics: [0,1,2,3]
hosts:
vlab-07:
ansible_host: 10.250.0.109
- ansible_hostv6: fec0::ffff:afa:7
+ ansible_hostv6: fec0::ffff:afa:9
+
+sonic_multi_asic_2:
+ vars:
+ hwsku: msft_four_asic_vs
+ iface_speed: 40000
+ num_asics: 4
+ start_topo_service: True
+ frontend_asics: [0,1]
+ hosts:
+ vlab-08:
+ ansible_host: 10.250.0.112
+ ansible_hostv6: fec0::ffff:afa:c
diff --git a/ansible/roles/vm_set/library/announce_routes.py b/ansible/library/announce_routes.py
similarity index 100%
rename from ansible/roles/vm_set/library/announce_routes.py
rename to ansible/library/announce_routes.py
diff --git a/ansible/library/config_facts.py b/ansible/library/config_facts.py
index e46cbc5c5fa..13c2f0fee42 100644
--- a/ansible/library/config_facts.py
+++ b/ansible/library/config_facts.py
@@ -74,7 +74,7 @@ def create_maps(config):
for idx, val in enumerate(port_name_list_sorted):
port_index_map[val] = idx
- port_name_to_alias_map = { name : v['alias'] for name, v in config["PORT"].iteritems()}
+ port_name_to_alias_map = { name : v['alias'] if 'alias' in v else '' for name, v in config["PORT"].iteritems()}
# Create inverse mapping between port name and alias
port_alias_to_name_map = {v: k for k, v in port_name_to_alias_map.iteritems()}
diff --git a/ansible/library/conn_graph_facts.py b/ansible/library/conn_graph_facts.py
index dbeca320a9b..66f4612d24d 100755
--- a/ansible/library/conn_graph_facts.py
+++ b/ansible/library/conn_graph_facts.py
@@ -9,9 +9,16 @@
from itertools import groupby
from collections import defaultdict
from natsort import natsorted
-from ansible.module_utils.port_utils import get_port_alias_to_name_map
-from ansible.module_utils.debug_utils import create_debug_file, print_debug_msg
+try:
+ from ansible.module_utils.port_utils import get_port_alias_to_name_map
+ from ansible.module_utils.debug_utils import create_debug_file, print_debug_msg
+except ImportError:
+ # Add parent dir for using outside Ansible
+ import sys
+ sys.path.append('..')
+ from module_utils.port_utils import get_port_alias_to_name_map
+ from module_utils.debug_utils import create_debug_file, print_debug_msg
DOCUMENTATION='''
module: conn_graph_facts.py
@@ -50,6 +57,10 @@
device_vlan_range: all configured vlan range for the device(host)
device_port_vlans: detailed vlanids for each physical port and switchport mode
server_links: each server port vlan ids
+ device_console_info: The device's console server type, mgmtip, hwsku and protocol
+ device_console_link: The console server port connected to the device
+ device_pdu_info: The device's pdu server type, mgmtip, hwsku and protocol
+ device_pdu_links: The pdu server ports connected to the device
'''
@@ -94,6 +105,9 @@
'''
+debug_fname = None
+
+
class Parse_Lab_Graph():
"""
Parse the generated lab physical connection graph and insert Ansible fact of the graph
@@ -112,9 +126,13 @@ def __init__(self, xmlfile):
self.vlanport = {}
self.vlanrange = {}
self.links = {}
+ self.consolelinks = {}
+ self.pdulinks = {}
self.server = defaultdict(dict)
self.pngtag = 'PhysicalNetworkGraphDeclaration'
self.dpgtag = 'DataPlaneGraph'
+ self.pcgtag = 'PowerControlGraphDeclaration'
+ self.csgtag = 'ConsoleGraphDeclaration'
def port_vlanlist(self, vlanrange):
vlans = []
@@ -182,6 +200,75 @@ def parse_graph(self):
self.links[start_dev][link.attrib['StartPort']] = {'peerdevice':link.attrib['EndDevice'], 'peerport': link.attrib['EndPort'], 'speed': link.attrib['BandWidth']}
if end_dev:
self.links[end_dev][link.attrib['EndPort']] = {'peerdevice': link.attrib['StartDevice'], 'peerport': link.attrib['StartPort'], 'speed': link.attrib['BandWidth']}
+ console_root = self.root.find(self.csgtag)
+ if console_root:
+ devicecsgroot = console_root.find('DevicesConsoleInfo')
+ devicescsg = devicecsgroot.findall('DeviceConsoleInfo')
+ if devicescsg is not None:
+ for dev in devicescsg:
+ hostname = dev.attrib['Hostname']
+ if hostname is not None:
+ deviceinfo[hostname] = {}
+ hwsku = dev.attrib['HwSku']
+ devtype = dev.attrib['Type']
+ protocol = dev.attrib['Protocol']
+ mgmt_ip = dev.attrib['ManagementIp']
+ deviceinfo[hostname]['HwSku'] = hwsku
+ deviceinfo[hostname]['Type'] = devtype
+ deviceinfo[hostname]['Protocol'] = protocol
+ deviceinfo[hostname]['ManagementIp'] = mgmt_ip
+ self.consolelinks[hostname] = {}
+ console_link_root = console_root.find('ConsoleLinksInfo')
+ if console_link_root:
+ allconsolelinks = console_link_root.findall('ConsoleLinkInfo')
+ if allconsolelinks is not None:
+ for consolelink in allconsolelinks:
+ start_dev = consolelink.attrib['StartDevice']
+ end_dev = consolelink.attrib['EndDevice']
+ if start_dev:
+ if start_dev not in self.consolelinks:
+ self.consolelinks.update({start_dev : {}})
+ self.consolelinks[start_dev][consolelink.attrib['StartPort']] = {'peerdevice':consolelink.attrib['EndDevice'], 'peerport': 'ConsolePort'}
+ if end_dev:
+ if end_dev not in self.consolelinks:
+ self.consolelinks.update({end_dev : {}})
+ self.consolelinks[end_dev]['ConsolePort'] = {'peerdevice': consolelink.attrib['StartDevice'], 'peerport': consolelink.attrib['StartPort']}
+
+ pdu_root = self.root.find(self.pcgtag)
+ if pdu_root:
+ devicepcgroot = pdu_root.find('DevicesPowerControlInfo')
+ devicespcsg = devicepcgroot.findall('DevicePowerControlInfo')
+ if devicespcsg is not None:
+ for dev in devicespcsg:
+ hostname = dev.attrib['Hostname']
+ if hostname is not None:
+ deviceinfo[hostname] = {}
+ hwsku = dev.attrib['HwSku']
+ devtype = dev.attrib['Type']
+ protocol = dev.attrib['Protocol']
+ mgmt_ip = dev.attrib['ManagementIp']
+ deviceinfo[hostname]['HwSku'] = hwsku
+ deviceinfo[hostname]['Type'] = devtype
+ deviceinfo[hostname]['Protocol'] = protocol
+ deviceinfo[hostname]['ManagementIp'] = mgmt_ip
+ self.pdulinks[hostname] = {}
+ pdu_link_root = pdu_root.find('PowerControlLinksInfo')
+ if pdu_link_root:
+ allpdulinks = pdu_link_root.findall('PowerControlLinkInfo')
+ if allpdulinks is not None:
+ for pdulink in allpdulinks:
+ start_dev = pdulink.attrib['StartDevice']
+ end_dev = pdulink.attrib['EndDevice']
+ print_debug_msg(debug_fname, "pdulink {}".format(pdulink.attrib))
+ print_debug_msg(debug_fname, "self.pdulinks {}".format(self.pdulinks))
+ if start_dev:
+ if start_dev not in self.pdulinks:
+ self.pdulinks.update({start_dev : {}})
+ self.pdulinks[start_dev][pdulink.attrib['StartPort']] = {'peerdevice':pdulink.attrib['EndDevice'], 'peerport': pdulink.attrib['EndPort']}
+ if end_dev:
+ if end_dev not in self.pdulinks:
+ self.pdulinks.update({end_dev : {}})
+ self.pdulinks[end_dev][pdulink.attrib['EndPort']] = {'peerdevice': pdulink.attrib['StartDevice'], 'peerport': pdulink.attrib['StartPort']}
self.devices = deviceinfo
self.vlanport = devicel2info
@@ -245,8 +332,71 @@ def get_host_connections(self, hostname):
"""
return self.links.get(hostname)
- def contains_hosts(self, hostnames):
- return set(hostnames) <= set(self.devices)
+ def contains_hosts(self, hostnames, part):
+ if not part:
+ return set(hostnames) <= set(self.devices)
+ # It's possible that not all devices are found in connect_graph when using in devutil
+ THRESHOLD = 0.8
+ count = 0
+ for hostname in hostnames:
+ if hostname in self.devices.keys():
+ count += 1
+ return hostnames and (count * 1.0 / len(hostnames) >= THRESHOLD)
+
+
+ def get_host_console_info(self, hostname):
+ """
+ return the given hostname console info of mgmtip, protocol, hwsku and type
+ """
+ if hostname in self.devices:
+ try:
+ ret = self.devices[self.consolelinks[hostname]['ConsolePort']['peerdevice']]
+ except KeyError:
+ ret = {}
+ return ret
+ else:
+ """
+ Please be noted that an empty dict is returned when hostname is not found
+ The behavior is different with get_host_vlan. devutils script will check if the returned dict
+ is empty to determine if console info exists for given hostname.
+ """
+ return {}
+
+ def get_host_console_link(self, hostname):
+ """
+ return the given hostname console link info of console server and port
+ """
+ if hostname in self.consolelinks:
+ return self.consolelinks[hostname]
+ else:
+ # Please be noted that an empty dict is returned when hostname is not found
+ return {}
+
+ def get_host_pdu_info(self, hostname):
+ """
+ return the given hostname pdu info of mgmtip, protocol, hwsku and type
+ """
+ if hostname in self.devices:
+ ret = {}
+ for key in ['PSU1', 'PSU2']:
+ try:
+ ret.update({key : self.devices[self.pdulinks[hostname][key]['peerdevice']]})
+ except KeyError:
+ pass
+ return ret
+ else:
+ # Please be noted that an empty dict is returned when hostname is not found
+ return {}
+
+ def get_host_pdu_links(self, hostname):
+ """
+ return the given hostname pdu links info of pdu servers and ports
+ """
+ if hostname in self.pdulinks:
+ return self.pdulinks[hostname]
+ else:
+ # Please be noted that an empty dict is returned when hostname is not found
+ return {}
LAB_CONNECTION_GRAPH_FILE = 'graph_files.yml'
@@ -254,14 +404,16 @@ def contains_hosts(self, hostnames):
LAB_GRAPHFILE_PATH = 'files/'
-def find_graph(hostnames):
+def find_graph(hostnames, part=False):
"""
Find a graph file contains all devices in testbed.
duts are spcified by hostnames
Parameters:
hostnames: list of duts in the target testbed.
+ part: select the graph file if over 80% of hosts are found in conn_graph when part is True
"""
+ global debug_fname
filename = os.path.join(LAB_GRAPHFILE_PATH, LAB_CONNECTION_GRAPH_FILE)
with open(filename) as fd:
file_list = yaml.safe_load(fd)
@@ -273,10 +425,9 @@ def find_graph(hostnames):
lab_graph = Parse_Lab_Graph(filename)
lab_graph.parse_graph()
print_debug_msg(debug_fname, "For file %s, got hostnames %s" % (fn, lab_graph.devices))
- if lab_graph.contains_hosts(hostnames):
+ if lab_graph.contains_hosts(hostnames, part):
print_debug_msg(debug_fname, ("Returning lab graph from conn graph file: %s for hosts %s" % (fn, hostnames)))
return lab_graph
-
# Fallback to return an empty connection graph, this is
# needed to bridge the kvm test needs. The KVM test needs
# A graph file, which used to be whatever hardcoded file.
@@ -299,9 +450,66 @@ def get_port_name_list(hwsku):
port_name_list_sorted = natsorted(port_name_list)
return port_name_list_sorted
-
-debug_fname = None
-
+def build_results(lab_graph, hostnames, ignore_error=False):
+ """
+ Refactor code for building json results.
+ Code is refactored because same logic is needed in devutil
+ """
+ device_info = {}
+ device_conn = {}
+ device_port_vlans = {}
+ device_vlan_range = {}
+ device_vlan_list = {}
+ device_vlan_map_list = {}
+ device_console_info = {}
+ device_console_link = {}
+ device_pdu_info = {}
+ device_pdu_links = {}
+ msg = {}
+ for hostname in hostnames:
+ dev = lab_graph.get_host_device_info(hostname)
+ if dev is None and not ignore_error:
+ msg = "cannot find info for %s" % hostname
+ return (False, msg)
+ device_info[hostname] = dev
+ device_conn[hostname] = lab_graph.get_host_connections(hostname)
+ host_vlan = lab_graph.get_host_vlan(hostname)
+ port_vlans = lab_graph.get_host_port_vlans(hostname)
+ # for multi-DUTs, must ensure all have vlan configured.
+ if host_vlan:
+ device_vlan_range[hostname] = host_vlan["VlanRange"]
+ device_vlan_list[hostname] = host_vlan["VlanList"]
+ if dev["Type"].lower() != "devsonic":
+ device_vlan_map_list[hostname] = host_vlan["VlanList"]
+ else:
+ device_vlan_map_list[hostname] = {}
+
+ port_name_list_sorted = get_port_name_list(dev['HwSku'])
+ print_debug_msg(debug_fname, "For %s with hwsku %s, port_name_list is %s" % (hostname, dev['HwSku'], port_name_list_sorted))
+ for a_host_vlan in host_vlan["VlanList"]:
+ # Get the corresponding port for this vlan from the port vlan list for this hostname
+ found_port_for_vlan = False
+ for a_port in port_vlans:
+ if a_host_vlan in port_vlans[a_port]['vlanlist']:
+ if a_port in port_name_list_sorted:
+ port_index = port_name_list_sorted.index(a_port)
+ device_vlan_map_list[hostname][port_index] = a_host_vlan
+ found_port_for_vlan = True
+ break
+ elif not ignore_error:
+ msg = "Did not find port for %s in the ports based on hwsku '%s' for host %s" % (a_port, dev['HwSku'], hostname)
+ return (False, msg)
+ if not found_port_for_vlan and not ignore_error:
+ msg = "Did not find corresponding link for vlan %d in %s for host %s" % (a_host_vlan, port_vlans, hostname)
+ return (False, msg)
+ device_port_vlans[hostname] = port_vlans
+ device_console_info[hostname] = lab_graph.get_host_console_info(hostname)
+ device_console_link[hostname] = lab_graph.get_host_console_link(hostname)
+ device_pdu_info[hostname] = lab_graph.get_host_pdu_info(hostname)
+ device_pdu_links[hostname] = lab_graph.get_host_pdu_links(hostname)
+ results = {k: v for k, v in locals().items()
+ if (k.startswith("device_") and v)}
+ return (True, results)
def main():
module = AnsibleModule(
@@ -357,51 +565,11 @@ def main():
'device_port_vlans': lab_graph.vlanport,
}
module.exit_json(ansible_facts=results)
-
- device_info = {}
- device_conn = {}
- device_port_vlans = {}
- device_vlan_range = {}
- device_vlan_list = {}
- device_vlan_map_list = {}
- for hostname in hostnames:
- dev = lab_graph.get_host_device_info(hostname)
- if dev is None:
- module.fail_json(msg="cannot find info for %s" % hostname)
- device_info[hostname] = dev
- device_conn[hostname] = lab_graph.get_host_connections(hostname)
- host_vlan = lab_graph.get_host_vlan(hostname)
- port_vlans = lab_graph.get_host_port_vlans(hostname)
- # for multi-DUTs, must ensure all have vlan configured.
- if host_vlan:
- device_vlan_range[hostname] = host_vlan["VlanRange"]
- device_vlan_list[hostname] = host_vlan["VlanList"]
- if dev["Type"].lower() != "devsonic":
- device_vlan_map_list[hostname] = host_vlan["VlanList"]
- else:
- device_vlan_map_list[hostname] = {}
-
- port_name_list_sorted = get_port_name_list(dev['HwSku'])
- print_debug_msg(debug_fname, "For %s with hwsku %s, port_name_list is %s" % (hostname, dev['HwSku'], port_name_list_sorted))
- for a_host_vlan in host_vlan["VlanList"]:
- # Get the corresponding port for this vlan from the port vlan list for this hostname
- found_port_for_vlan = False
- for a_port in port_vlans:
- if a_host_vlan in port_vlans[a_port]['vlanlist']:
- if a_port in port_name_list_sorted:
- port_index = port_name_list_sorted.index(a_port)
- device_vlan_map_list[hostname][port_index] = a_host_vlan
- found_port_for_vlan = True
- break
- else:
- module.fail_json(msg="Did not find port for %s in the ports based on hwsku '%s' for host %s" % (a_port, dev['HwSku'], hostname))
- if not found_port_for_vlan:
- module.fail_json(msg="Did not find corresponding link for vlan %d in %s for host %s" % (a_host_vlan, port_vlans, hostname))
- device_port_vlans[hostname] = port_vlans
- results = {k: v for k, v in locals().items()
- if (k.startswith("device_") and v)}
-
- module.exit_json(ansible_facts=results)
+ succeed, results = build_results(lab_graph, hostnames)
+ if succeed:
+ module.exit_json(ansible_facts=results)
+ else:
+ module.fail_json(msg=results)
except (IOError, OSError):
module.fail_json(msg="Can not find lab graph file under {}".format(LAB_GRAPHFILE_PATH))
except Exception as e:
diff --git a/ansible/library/minigraph_facts.py b/ansible/library/minigraph_facts.py
index 1dfee77a7cf..400de569438 100644
--- a/ansible/library/minigraph_facts.py
+++ b/ansible/library/minigraph_facts.py
@@ -4,6 +4,7 @@
import sys
import socket
import struct
+import traceback
import json
import copy
import ipaddr as ipaddress
@@ -222,7 +223,6 @@ def parse_dpg(dpg, hname):
mgmt_intf = {'addr': ipaddr, 'alias': intfname, 'prefixlen': prefix_len, 'mask': ipmask, 'gwaddr': gwaddr}
pcintfs = child.find(str(QName(ns, "PortChannelInterfaces")))
- pc_intfs = []
pcs = {}
for pcintf in pcintfs.findall(str(QName(ns, "PortChannel"))):
pcintfname = pcintf.find(str(QName(ns, "Name"))).text
@@ -233,12 +233,11 @@ def parse_dpg(dpg, hname):
ports[port_alias_to_name_map[member]] = {'name': port_alias_to_name_map[member], 'alias': member}
pcs[pcintfname] = {'name': pcintfname, 'members': pcmbr_list}
fallback_node = pcintf.find(str(QName(ns, "Fallback")))
- if fallback_node is not None:
+ if fallback_node is not None:
pcs[pcintfname]['fallback'] = fallback_node.text
- ports.pop(pcintfname)
+ ports.pop(pcintfname, None)
vlanintfs = child.find(str(QName(ns, "VlanInterfaces")))
- vlan_intfs = []
dhcp_servers = []
vlans = {}
for vintf in vlanintfs.findall(str(QName(ns, "VlanInterface"))):
@@ -253,6 +252,9 @@ def parse_dpg(dpg, hname):
vlandhcpservers = ""
dhcp_servers = vlandhcpservers.split(";")
for i, member in enumerate(vmbr_list):
+ # Skip PortChannel inside Vlan
+ if member in pcs:
+ continue
vmbr_list[i] = port_alias_to_name_map[member]
ports[port_alias_to_name_map[member]] = {'name': port_alias_to_name_map[member], 'alias': member}
vlan_attributes = {'name': vintfname, 'members': vmbr_list, 'vlanid': vlanid}
@@ -590,8 +592,9 @@ def main():
results_clean = json.loads(json.dumps(results, cls=minigraph_encoder))
module.exit_json(ansible_facts=results_clean)
except Exception as e:
+ tb = traceback.format_exc()
# all attempts to find a minigraph failed.
- module.fail_json(msg=e.message)
+ module.fail_json(msg=e.message + "\n" + tb)
def print_parse_xml(hostname):
diff --git a/ansible/library/port_alias.py b/ansible/library/port_alias.py
index 68359cd6d92..9e07ccc630f 100755
--- a/ansible/library/port_alias.py
+++ b/ansible/library/port_alias.py
@@ -94,6 +94,11 @@ def get_portmap(self, asic_id=None):
portmap = {}
aliasmap = {}
portspeed = {}
+ # Front end interface asic names
+ front_panel_asic_ifnames = []
+ # All asic names
+ asic_if_names = []
+
filename = self.get_portconfig_path(asic_id)
if filename is None:
raise Exception("Something wrong when trying to find the portmap file, either the hwsku is not available or file location is not correct")
@@ -102,6 +107,7 @@ def get_portmap(self, asic_id=None):
alias_index = -1
speed_index = -1
role_index = -1
+ asic_name_index = -1
while len(lines) != 0:
line = lines.pop(0)
if re.match('^#', line):
@@ -115,6 +121,8 @@ def get_portmap(self, asic_id=None):
speed_index = index
if 'role' in text:
role_index = index
+ if 'asic_port_name' in text:
+ asic_name_index = index
else:
#added support to parse recycle port
if re.match('^Ethernet', line) or re.match('^Inband', line):
@@ -134,8 +142,14 @@ def get_portmap(self, asic_id=None):
aliasmap[alias] = name
if (speed_index != -1) and (len(mapping) > speed_index):
portspeed[alias] = mapping[speed_index]
+ if (asic_name_index != -1) and (len(mapping) > asic_name_index):
+ asicifname = mapping[asic_name_index]
+ front_panel_asic_ifnames.append(asicifname)
+ if (asic_name_index != -1) and (len(mapping) > asic_name_index):
+ asicifname = mapping[asic_name_index]
+ asic_if_names.append(asicifname)
- return (aliases, portmap, aliasmap, portspeed)
+ return (aliases, portmap, aliasmap, portspeed, front_panel_asic_ifnames, asic_if_names)
def main():
module = AnsibleModule(
@@ -152,6 +166,11 @@ def main():
aliasmap = {}
portspeed = {}
allmap = SonicPortAliasMap(m_args['hwsku'])
+ # ASIC interface names of front panel interfaces
+ front_panel_asic_ifnames = []
+ # { asic_name: [ asic interfaces] }
+ asic_if_names = {}
+
# When this script is invoked on sonic-mgmt docker, num_asic
# parameter is passed.
if m_args['num_asic'] is not None:
@@ -166,7 +185,7 @@ def main():
for asic_id in range(num_asic):
if num_asic == 1:
asic_id = None
- (aliases_asic, portmap_asic, aliasmap_asic, portspeed_asic) = allmap.get_portmap(asic_id)
+ (aliases_asic, portmap_asic, aliasmap_asic, portspeed_asic, front_panel_asic, asicifnames_asic) = allmap.get_portmap(asic_id)
if aliases_asic is not None:
aliases.extend(aliases_asic)
if portmap_asic is not None:
@@ -175,10 +194,17 @@ def main():
aliasmap.update(aliasmap_asic)
if portspeed_asic is not None:
portspeed.update(portspeed_asic)
+ if front_panel_asic is not None:
+ front_panel_asic_ifnames.extend(front_panel_asic)
+ if asicifnames_asic is not None:
+ asic = 'ASIC' + str(asic_id)
+ asic_if_names[asic] = asicifnames_asic
module.exit_json(ansible_facts={'port_alias': aliases,
'port_name_map': portmap,
'port_alias_map': aliasmap,
- 'port_speed': portspeed})
+ 'port_speed': portspeed,
+ 'front_panel_asic_ifnames': front_panel_asic_ifnames,
+ 'asic_if_names': asic_if_names})
except (IOError, OSError), e:
fail_msg = "IO error" + str(e)
module.fail_json(msg=fail_msg)
diff --git a/ansible/library/reduce_and_add_sonic_images.py b/ansible/library/reduce_and_add_sonic_images.py
index fa1fc92e9a0..526b9bdeee4 100644
--- a/ansible/library/reduce_and_add_sonic_images.py
+++ b/ansible/library/reduce_and_add_sonic_images.py
@@ -56,10 +56,10 @@ def download_new_sonic_image(module, new_image_url, save_as):
cmd="rm -f {}".format(save_as),
msg="clean up previously downloaded image",
ignore_error=True)
-
- exec_command(module,
- cmd="curl -o {} {}".format(save_as, new_image_url),
- msg="downloading new image")
+ if new_image_url:
+ exec_command(module,
+ cmd="curl -o {} {}".format(save_as, new_image_url),
+ msg="downloading new image")
if path.exists(save_as):
_, out, _ = exec_command(module, cmd="sonic_installer binary_version {}".format(save_as))
diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py
index 606f8582e79..af64cfd2906 100644
--- a/ansible/library/snmp_facts.py
+++ b/ansible/library/snmp_facts.py
@@ -206,6 +206,9 @@ def __init__(self,dotprefix=False):
# Memory Check
self.sysTotalMemery = dp + "1.3.6.1.4.1.2021.4.5.0"
self.sysTotalFreeMemery = dp + "1.3.6.1.4.1.2021.4.6.0"
+ self.sysTotalSharedMemory = dp + "1.3.6.1.4.1.2021.4.13.0"
+ self.sysTotalBuffMemory = dp + "1.3.6.1.4.1.2021.4.14.0"
+ self.sysCachedMemory = dp + "1.3.6.1.4.1.2021.4.15.0"
# From Cisco private MIB (PFC and queue counters)
self.cpfcIfRequests = dp + "1.3.6.1.4.1.9.9.813.1.1.1.1" # + .ifindex
@@ -217,6 +220,10 @@ def __init__(self,dotprefix=False):
# From Cisco private MIB (PSU)
self.cefcFRUPowerOperStatus = dp + "1.3.6.1.4.1.9.9.117.1.1.2.1.2" # + .psuindex
+ # ipCidrRouteTable MIB
+ self.ipCidrRouteEntry = dp + "1.3.6.1.2.1.4.24.4.1.1.0.0.0.0.0.0.0.0.0" # + .next hop IP
+ self.ipCidrRouteStatus = dp + "1.3.6.1.2.1.4.24.4.1.16.0.0.0.0.0.0.0.0.0" # + .next hop IP
+
def decode_hex(hexstring):
if len(hexstring) < 3:
@@ -866,18 +873,43 @@ def main():
psuIndex = int(current_oid.split('.')[-1])
results['snmp_psu'][psuIndex]['operstatus'] = current_val
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((m_args['host'], 161)),
+ cmdgen.MibVariable(p.ipCidrRouteEntry,),
+ cmdgen.MibVariable(p.ipCidrRouteStatus,),
+ )
+
+ if errorIndication:
+ module.fail_json(msg=str(errorIndication) + ' querying CidrRouteTable')
+
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ if v.ipCidrRouteEntry in current_oid:
+ # extract next hop ip from oid
+ next_hop = current_oid.split(v.ipCidrRouteEntry + ".")[1]
+ results['snmp_cidr_route'][next_hop]['route_dest'] = current_val
+ if v.ipCidrRouteStatus in current_oid:
+ next_hop = current_oid.split(v.ipCidrRouteStatus + ".")[1]
+ results['snmp_cidr_route'][next_hop]['status'] = current_val
+
if not m_args['is_eos']:
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((m_args['host'], 161)),
cmdgen.MibVariable(p.sysTotalMemery,),
cmdgen.MibVariable(p.sysTotalFreeMemery,),
+ cmdgen.MibVariable(p.sysTotalSharedMemory,),
+ cmdgen.MibVariable(p.sysTotalBuffMemory,),
+ cmdgen.MibVariable(p.sysCachedMemory,),
lookupMib=False, lexicographicMode=False
)
-
+
if errorIndication:
module.fail_json(msg=str(errorIndication) + ' querying system infomation.')
-
+
for oid, val in varBinds:
current_oid = oid.prettyPrint()
current_val = val.prettyPrint()
@@ -885,6 +917,12 @@ def main():
results['ansible_sysTotalMemery'] = decode_type(module, current_oid, val)
elif current_oid == v.sysTotalFreeMemery:
results['ansible_sysTotalFreeMemery'] = decode_type(module, current_oid, val)
+ elif current_oid == v.sysTotalSharedMemory:
+ results['ansible_sysTotalSharedMemory'] = decode_type(module, current_oid, val)
+ elif current_oid == v.sysTotalBuffMemory:
+ results['ansible_sysTotalBuffMemory'] = decode_type(module, current_oid, val)
+ elif current_oid == v.sysCachedMemory:
+ results['ansible_sysCachedMemory'] = decode_type(module, current_oid, val)
module.exit_json(ansible_facts=results)
diff --git a/ansible/library/test_facts.py b/ansible/library/test_facts.py
index c50d02ff599..e2c7ddf26a9 100644
--- a/ansible/library/test_facts.py
+++ b/ansible/library/test_facts.py
@@ -28,13 +28,20 @@
'''
EXAMPLES = '''
- Testbed CSV file example:
+ Testbed CSV file example - deprecated:
# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,Tests ptf
vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
...
+ Testbed CSV file example - recommended:
+ # conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_file,auto_recover,comment
+ ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Tests ptf
+ vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
+ vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
+ ...
+
Testcases YAML File example:
testcases:
acl:
@@ -98,7 +105,8 @@
class ParseTestbedTopoinfo():
"""Parse the testbed file used to describe whole testbed info"""
- TESTBED_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_DEPRECATED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_RECOMMENDED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'inv_name', 'auto_recover', 'comment')
def __init__(self, testbed_file):
self.testbed_filename = testbed_file
@@ -113,13 +121,17 @@ def _cidr_to_ip_mask(network):
def _read_testbed_topo_from_csv():
"""Read csv testbed info file."""
with open(self.testbed_filename) as f:
- topo = csv.DictReader(f, fieldnames=self.TESTBED_FIELDS,
- delimiter=',')
-
- # Validate all field are in the same order and are present
- header = next(topo)
- for field in self.TESTBED_FIELDS:
- assert header[field].replace('#', '').strip() == field
+ header = [field.strip(' #') for field in f.readline().strip().split(',')]
+ if len(header) == len(self.TESTBED_FIELDS_DEPRECATED):
+ testbed_fields = self.TESTBED_FIELDS_DEPRECATED
+ elif len(header) == len(self.TESTBED_FIELDS_RECOMMENDED):
+ testbed_fields = self.TESTBED_FIELDS_RECOMMENDED
+ else:
+ raise ValueError('Unsupported testbed fields %s' % str(header))
+ for header_field, expect_field in zip(header, testbed_fields):
+ assert header_field == expect_field
+
+ topo = csv.DictReader(f, fieldnames=testbed_fields, delimiter=',')
for line in topo:
if line['conf-name'].lstrip().startswith('#'):
@@ -133,7 +145,7 @@ def _read_testbed_topo_from_csv():
_cidr_to_ip_mask(line["ptf_ipv6"])
line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';')
- line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']}
+ line['duts_map'] = {dut: line['duts'].index(dut) for dut in line['duts']}
del line['dut']
self.testbed_topo[line['conf-name']] = line
diff --git a/ansible/library/topo_facts.py b/ansible/library/topo_facts.py
index c6f2dfd86fc..5a3d9bee642 100644
--- a/ansible/library/topo_facts.py
+++ b/ansible/library/topo_facts.py
@@ -66,8 +66,92 @@ class ParseTestbedTopoinfo():
'''
def __init__(self):
self.vm_topo_config = {}
-
- def get_topo_config(self, topo_name):
+ self.asic_topo_config = {}
+
+ def parse_topo_defintion(self, topo_definition, po_map, dut_num, neigh_type='VMs'):
+ dut_asn = topo_definition['configuration_properties']['common']['dut_asn']
+ vmconfig = dict()
+ for vm in topo_definition['topology'][neigh_type]:
+ vmconfig[vm] = dict()
+ vmconfig[vm]['intfs'] = [[] for i in range(dut_num)]
+ if 'properties' in vmconfig[vm]:
+ vmconfig[vm]['properties']=topo_definition['configuration'][vm]['properties']
+ if neigh_type == 'VMs':
+ vmconfig[vm]['interface_indexes'] = [[] for i in range(dut_num)]
+ for vlan in topo_definition['topology'][neigh_type][vm]['vlans']:
+ (dut_index, vlan_index, _) = parse_vm_vlan_port(vlan)
+ vmconfig[vm]['interface_indexes'][dut_index].append(vlan_index)
+ if neigh_type == 'NEIGH_ASIC':
+ vmconfig[vm]['asic_intfs'] = [[] for i in range(dut_num)]
+ dut_index = 0
+ for asic_intf in topo_definition['topology'][neigh_type][vm]['asic_intfs']:
+ vmconfig[vm]['asic_intfs'][dut_index].append(asic_intf)
+
+ # physical interface
+ for intf in topo_definition['configuration'][vm]['interfaces']:
+ if (neigh_type == 'VMs' and 'Ethernet' in intf) or \
+ (neigh_type == 'NEIGH_ASIC' and re.match("Eth(\d+)-", intf)):
+ dut_index = 0
+ if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]:
+ dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index']
+ if 'lacp' in topo_definition['configuration'][vm]['interfaces'][intf]:
+ po_map[topo_definition['configuration'][vm]['interfaces'][intf]['lacp']] = dut_index
+
+ vmconfig[vm]['intfs'][dut_index].append(intf)
+
+ # ip interface
+ vmconfig[vm]['ip_intf'] = [None] * dut_num
+ vmconfig[vm]['peer_ipv4'] = [None] * dut_num
+ vmconfig[vm]['ipv4mask'] = [None] * dut_num
+ vmconfig[vm]['peer_ipv6'] = [None] * dut_num
+ vmconfig[vm]['ipv6mask'] = [None] * dut_num
+
+
+ for intf in topo_definition['configuration'][vm]['interfaces']:
+ dut_index = 0
+ if (neigh_type == 'VMs' and 'Ethernet' in intf) or \
+ (neigh_type == 'NEIGH_ASIC' and re.match("Eth(\d+)-", intf)):
+ if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]:
+ dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index']
+ elif 'Port-Channel' in intf:
+ m = re.search("(\d+)", intf)
+ dut_index = po_map[int(m.group(1))]
+
+ if 'ipv4' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()):
+ (peer_ipv4, ipv4_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv4'].split('/')
+ vmconfig[vm]['peer_ipv4'][dut_index] = peer_ipv4
+ vmconfig[vm]['ipv4mask'][dut_index] = ipv4_mask
+ vmconfig[vm]['ip_intf'][dut_index] = intf
+ if 'ipv6' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()):
+ (ipv6_addr, ipv6_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv6'].split('/')
+ vmconfig[vm]['peer_ipv6'][dut_index] = ipv6_addr.upper()
+ vmconfig[vm]['ipv6mask'][dut_index] = ipv6_mask
+ vmconfig[vm]['ip_intf'][dut_index] = intf
+
+ # bgp
+ vmconfig[vm]['bgp_ipv4'] = [None] * dut_num
+ vmconfig[vm]['bgp_ipv6'] = [None] * dut_num
+ vmconfig[vm]['bgp_asn'] = topo_definition['configuration'][vm]['bgp']['asn']
+ for ipstr in topo_definition['configuration'][vm]['bgp']['peers'][dut_asn]:
+ ip = ipaddress.ip_address(ipstr.decode('utf8'))
+ for dut_index in range(0, dut_num):
+ if ip.version == 4:
+ # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index
+ if vmconfig[vm]['peer_ipv4'][dut_index]:
+ ipsubnet_str = vmconfig[vm]['peer_ipv4'][dut_index]+'/'+vmconfig[vm]['ipv4mask'][dut_index]
+ ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8'))
+ if ip in ipsubnet.network:
+ vmconfig[vm]['bgp_ipv4'][dut_index] = ipstr.upper()
+ elif ip.version == 6:
+ # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index
+ if vmconfig[vm]['peer_ipv6'][dut_index]:
+ ipsubnet_str = vmconfig[vm]['peer_ipv6'][dut_index]+'/'+vmconfig[vm]['ipv6mask'][dut_index]
+ ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8'))
+ if ip in ipsubnet.network:
+ vmconfig[vm]['bgp_ipv6'][dut_index] = ipstr.upper()
+ return vmconfig
+
+ def get_topo_config(self, topo_name, hwsku):
CLET_SUFFIX = "-clet"
if 'ptf32' in topo_name:
@@ -76,7 +160,9 @@ def get_topo_config(self, topo_name):
topo_name = 't1-64'
topo_name = re.sub(CLET_SUFFIX + "$", "", topo_name)
topo_filename = 'vars/topo_' + topo_name + '.yml'
+ asic_topo_filename = 'vars/topo_' + hwsku + '.yml'
vm_topo_config = dict()
+ asic_topo_config = dict()
po_map = [None] * 16 # maximum 16 port channel interfaces
### read topology definition
@@ -86,6 +172,12 @@ def get_topo_config(self, topo_name):
with open(topo_filename) as f:
topo_definition = yaml.load(f)
+ if not os.path.isfile(asic_topo_filename):
+ asic_definition = {}
+ else:
+ with open(asic_topo_filename) as f:
+ asic_definition = yaml.load(f)
+
### parse topo file specified in vars/ to reverse as dut config
dut_num = 1
if 'dut_num' in topo_definition['topology']:
@@ -96,78 +188,18 @@ def get_topo_config(self, topo_name):
dut_asn = topo_definition['configuration_properties']['common']['dut_asn']
vm_topo_config['dut_asn'] = dut_asn
vm_topo_config['dut_type'] = topo_definition['configuration_properties']['common']['dut_type']
- vmconfig = dict()
- for vm in topo_definition['topology']['VMs']:
- vmconfig[vm] = dict()
- vmconfig[vm]['intfs'] = [[] for i in range(dut_num)]
- vmconfig[vm]['properties']=topo_definition['configuration'][vm]['properties']
- vmconfig[vm]['interface_indexes'] = [[] for i in range(dut_num)]
- for vlan in topo_definition['topology']['VMs'][vm]['vlans']:
- (dut_index, vlan_index, _) = parse_vm_vlan_port(vlan)
- vmconfig[vm]['interface_indexes'][dut_index].append(vlan_index)
+ vm_topo_config['vm'] = self.parse_topo_defintion(topo_definition, po_map, dut_num, 'VMs')
- # physical interface
- for intf in topo_definition['configuration'][vm]['interfaces']:
- if 'Ethernet' in intf:
- dut_index = 0
- if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]:
- dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index']
- if 'lacp' in topo_definition['configuration'][vm]['interfaces'][intf]:
- po_map[topo_definition['configuration'][vm]['interfaces'][intf]['lacp']] = dut_index
+ for asic in asic_definition:
+ po_map_asic = [None] * 16 # maximum 16 port channel interfaces
+ asic_topo_config[asic] = dict()
+ asic_topo_config[asic]['dut_asn'] = asic_definition[asic]['configuration_properties']['common']['dut_asn']
+ asic_topo_config[asic]['asic_type'] = asic_definition[asic]['configuration_properties']['common']['asic_type']
+ asic_topo_config[asic]['Loopback4096'] = []
+ for lo4096 in asic_definition[asic]['configuration_properties']['common']['Loopback4096']:
+ asic_topo_config[asic]['Loopback4096'].append(lo4096)
- vmconfig[vm]['intfs'][dut_index].append(intf)
-
- # ip interface
- vmconfig[vm]['ip_intf'] = [None] * dut_num
- vmconfig[vm]['peer_ipv4'] = [None] * dut_num
- vmconfig[vm]['ipv4mask'] = [None] * dut_num
- vmconfig[vm]['peer_ipv6'] = [None] * dut_num
- vmconfig[vm]['ipv6mask'] = [None] * dut_num
-
-
- for intf in topo_definition['configuration'][vm]['interfaces']:
- dut_index = 0
- if 'Ethernet' in intf:
- if 'dut_index' in topo_definition['configuration'][vm]['interfaces'][intf]:
- dut_index = topo_definition['configuration'][vm]['interfaces'][intf]['dut_index']
- elif 'Port-Channel' in intf:
- m = re.search("(\d+)", intf)
- dut_index = po_map[int(m.group(1))]
-
- if 'ipv4' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()):
- (peer_ipv4, ipv4_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv4'].split('/')
- vmconfig[vm]['peer_ipv4'][dut_index] = peer_ipv4
- vmconfig[vm]['ipv4mask'][dut_index] = ipv4_mask
- vmconfig[vm]['ip_intf'][dut_index] = intf
- if 'ipv6' in topo_definition['configuration'][vm]['interfaces'][intf] and ('loopback' not in intf.lower()):
- (ipv6_addr, ipv6_mask) = topo_definition['configuration'][vm]['interfaces'][intf]['ipv6'].split('/')
- vmconfig[vm]['peer_ipv6'][dut_index] = ipv6_addr.upper()
- vmconfig[vm]['ipv6mask'][dut_index] = ipv6_mask
- vmconfig[vm]['ip_intf'][dut_index] = intf
-
- # bgp
- vmconfig[vm]['bgp_ipv4'] = [None] * dut_num
- vmconfig[vm]['bgp_ipv6'] = [None] * dut_num
- vmconfig[vm]['bgp_asn'] = topo_definition['configuration'][vm]['bgp']['asn']
- for ipstr in topo_definition['configuration'][vm]['bgp']['peers'][dut_asn]:
- ip = ipaddress.ip_address(ipstr.decode('utf8'))
- for dut_index in range(0, dut_num):
- if ip.version == 4:
- # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index
- if vmconfig[vm]['peer_ipv4'][dut_index]:
- ipsubnet_str = vmconfig[vm]['peer_ipv4'][dut_index]+'/'+vmconfig[vm]['ipv4mask'][dut_index]
- ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8'))
- if ip in ipsubnet.network:
- vmconfig[vm]['bgp_ipv4'][dut_index] = ipstr.upper()
- elif ip.version == 6:
- # Each VM might not be connected to all the DUT's, so check if this VM is a peer to DUT at dut_index
- if vmconfig[vm]['peer_ipv6'][dut_index]:
- ipsubnet_str = vmconfig[vm]['peer_ipv6'][dut_index]+'/'+vmconfig[vm]['ipv6mask'][dut_index]
- ipsubnet = ipaddress.ip_interface(ipsubnet_str.decode('utf8'))
- if ip in ipsubnet.network:
- vmconfig[vm]['bgp_ipv6'][dut_index] = ipstr.upper()
-
- vm_topo_config['vm'] = vmconfig
+ asic_topo_config[asic]['neigh_asic'] = self.parse_topo_defintion(asic_definition[asic], po_map_asic, 1, 'NEIGH_ASIC')
vm_topo_config['host_interfaces_by_dut'] = [[] for i in range(dut_num)]
if 'host_interfaces' in topo_definition['topology']:
@@ -191,22 +223,26 @@ def get_topo_config(self, topo_name):
vm_topo_config['DUT'] = {}
self.vm_topo_config = vm_topo_config
- return vm_topo_config
+ self.asic_topo_config = asic_topo_config
+ return vm_topo_config, asic_topo_config
def main():
module = AnsibleModule(
argument_spec=dict(
topo=dict(required=True, default=None),
+ hwsku=dict(required=True, default=None),
),
supports_check_mode=True
)
m_args = module.params
topo_name = m_args['topo']
+ hwsku = m_args['hwsku']
try:
topoinfo = ParseTestbedTopoinfo()
- vm_topo_config = topoinfo.get_topo_config(topo_name)
- module.exit_json(ansible_facts={'vm_topo_config': vm_topo_config})
+ vm_topo_config, asic_topo_config = topoinfo.get_topo_config(topo_name, hwsku)
+ module.exit_json(ansible_facts={'vm_topo_config': vm_topo_config,
+ 'asic_topo_config': asic_topo_config})
except (IOError, OSError):
module.fail_json(msg="Can not find topo file for %s" % topo_name)
except Exception as e:
diff --git a/ansible/library/vlan_config.py b/ansible/library/vlan_config.py
index ba3b25eca57..a100eeaf595 100644
--- a/ansible/library/vlan_config.py
+++ b/ansible/library/vlan_config.py
@@ -53,6 +53,7 @@ def main():
vlan_configs[vlan]['prefix'] = vlan_param['prefix']
vlan_configs[vlan]['prefix_v6'] = vlan_param['prefix_v6']
vlan_configs[vlan]['intfs'] = [port_alias[i] for i in vlan_param['intfs']]
+ vlan_configs[vlan]['portchannels'] = vlan_param.get('portchannels', [])
if 'mac' in vlan_param:
vlan_configs[vlan]['mac'] = vlan_param['mac']
diff --git a/ansible/minigraph/vlab-08.t1-8-lag.xml b/ansible/minigraph/vlab-08.t1-8-lag.xml
new file mode 100644
index 00000000000..1964edc8f78
--- /dev/null
+++ b/ansible/minigraph/vlab-08.t1-8-lag.xml
@@ -0,0 +1,1581 @@
+
+
+
+
+
+ BGPSession
+ false
+ ASIC2
+ 10.1.0.0
+ ASIC0
+ 10.1.0.1
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC2
+ 10.1.0.4
+ ASIC1
+ 10.1.0.5
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC3
+ 10.1.0.2
+ ASIC0
+ 10.1.0.3
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC3
+ 10.1.0.6
+ ASIC1
+ 10.1.0.7
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC2
+ 10.1.0.0
+ ASIC0
+ 10.1.0.1
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC2
+ 10.1.0.4
+ ASIC1
+ 10.1.0.5
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC3
+ 10.1.0.2
+ ASIC0
+ 10.1.0.3
+ 1
+ 0
+ 0
+
+
+ BGPSession
+ false
+ ASIC3
+ 10.1.0.6
+ ASIC1
+ 10.1.0.7
+ 1
+ 0
+ 0
+
+
+ false
+ ASIC1
+ 10.0.0.32
+ ARISTA01T0
+ 10.0.0.33
+ 1
+ 10
+ 3
+
+
+ ASIC1
+ FC00::41
+ ARISTA01T0
+ FC00::42
+ 1
+ 10
+ 3
+
+
+ false
+ ASIC0
+ 10.0.0.0
+ ARISTA01T2
+ 10.0.0.1
+ 1
+ 10
+ 3
+
+
+ ASIC0
+ FC00::1
+ ARISTA01T2
+ FC00::2
+ 1
+ 10
+ 3
+
+
+ false
+ ASIC1
+ 10.0.0.34
+ ARISTA02T0
+ 10.0.0.35
+ 1
+ 10
+ 3
+
+
+ ASIC1
+ FC00::45
+ ARISTA02T0
+ FC00::46
+ 1
+ 10
+ 3
+
+
+ false
+ ASIC1
+ 10.0.0.36
+ ARISTA03T0
+ 10.0.0.37
+ 1
+ 10
+ 3
+
+
+ ASIC1
+ FC00::49
+ ARISTA03T0
+ FC00::4A
+ 1
+ 10
+ 3
+
+
+ false
+ ASIC0
+ 10.0.0.4
+ ARISTA03T2
+ 10.0.0.5
+ 1
+ 10
+ 3
+
+
+ ASIC0
+ FC00::9
+ ARISTA03T2
+ FC00::A
+ 1
+ 10
+ 3
+
+
+ false
+ ASIC1
+ 10.0.0.38
+ ARISTA04T0
+ 10.0.0.39
+ 1
+ 10
+ 3
+
+
+ ASIC1
+ FC00::4D
+ ARISTA04T0
+ FC00::4E
+ 1
+ 10
+ 3
+
+
+
+
+ 65100
+ vlab-07
+
+
+ 10.0.0.33
+
+
+
+
+
+ 10.0.0.1
+
+
+
+
+
+ 10.0.0.35
+
+
+
+
+
+ 10.0.0.37
+
+
+
+
+
+ 10.0.0.5
+
+
+
+
+
+ 10.0.0.39
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC0
+
+
+ BGPPeer
+ 10.1.0.1
+
+
+
+
+
+ BGPPeer
+ 10.1.0.3
+
+
+
+
+
+ 10.0.0.1
+
+
+
+
+
+ 10.0.0.5
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC1
+
+
+ BGPPeer
+ 10.1.0.5
+
+
+
+
+
+ BGPPeer
+ 10.1.0.7
+
+
+
+
+
+ 10.0.0.33
+
+
+
+
+
+ 10.0.0.35
+
+
+
+
+
+ 10.0.0.37
+
+
+
+
+
+ 10.0.0.39
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC2
+
+
+ BGPPeer
+ 10.1.0.0
+
+
+
+
+
+ BGPPeer
+ 10.1.0.4
+
+
+
+
+
+
+
+
+ 65100
+
+ ASIC3
+
+
+ BGPPeer
+ 10.1.0.2
+
+
+
+
+
+ BGPPeer
+ 10.1.0.6
+
+
+
+
+
+
+
+
+ 64001
+ ARISTA01T0
+
+
+
+ 65200
+ ARISTA01T2
+
+
+
+ 64002
+ ARISTA02T0
+
+
+
+ 64003
+ ARISTA03T0
+
+
+
+ 65200
+ ARISTA03T2
+
+
+
+ 64004
+ ARISTA04T0
+
+
+
+
+
+
+
+
+
+ HostIP
+ Loopback0
+
+ 10.1.0.32/32
+
+ 10.1.0.32/32
+
+
+ HostIP1
+ Loopback0
+
+ FC00:1::32/128
+
+ FC00:1::32/128
+
+
+
+
+ HostIP
+ eth0
+
+ 10.250.0.112/24
+
+ 10.250.0.112/24
+
+
+ V6HostIP
+ eth0
+
+ fec0::ffff:afa:7/64
+
+ fec0::ffff:afa:7/64
+
+
+
+
+
+
+ vlab-07
+
+
+ PortChannel0001
+ Ethernet1/5
+
+
+
+ PortChannel0002
+ Ethernet1/1;Ethernet1/2
+
+
+
+ PortChannel0003
+ Ethernet1/6
+
+
+
+ PortChannel0004
+ Ethernet1/7
+
+
+
+ PortChannel0005
+ Ethernet1/3;Ethernet1/4
+
+
+
+ PortChannel0006
+ Ethernet1/8
+
+
+
+
+
+
+
+
+ PortChannel0001
+ 10.0.0.32/31
+
+
+
+ PortChannel0001
+ FC00::41/126
+
+
+
+ PortChannel0002
+ 10.0.0.0/31
+
+
+
+ PortChannel0002
+ FC00::1/126
+
+
+
+ PortChannel0003
+ 10.0.0.34/31
+
+
+
+ PortChannel0003
+ FC00::45/126
+
+
+
+ PortChannel0004
+ 10.0.0.36/31
+
+
+
+ PortChannel0004
+ FC00::49/126
+
+
+
+ PortChannel0005
+ 10.0.0.4/31
+
+
+
+ PortChannel0005
+ FC00::9/126
+
+
+
+ PortChannel0006
+ 10.0.0.38/31
+
+
+
+ PortChannel0006
+ FC00::4D/126
+
+
+
+
+
+ SNMP_ACL
+ SNMP
+ SNMP
+
+
+ ERSPAN
+ Everflow
+ Everflow
+
+
+ ERSPANV6
+ EverflowV6
+ EverflowV6
+
+
+ VTY_LINE
+ ssh-only
+ SSH
+
+
+ PortChannel0001;PortChannel0002;PortChannel0003;PortChannel0004;PortChannel0005;PortChannel0006
+ DataAcl
+ DataPlane
+
+
+
+
+
+
+
+
+
+ HostIP
+ Loopback0
+
+ 10.1.0.32/32
+
+ 10.1.0.32/32
+
+
+ HostIP1
+ Loopback0
+
+ FC00:1::32/128
+
+ FC00:1::32/128
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.0/32
+
+ 8.0.0.0/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:1::32/128
+
+ FD00:1::32/128
+
+
+
+
+
+
+
+ ASIC0
+
+
+ PortChannelInterface
+ PortChannel4001
+ Eth4-ASIC0;Eth5-ASIC0
+
+
+
+ PortChannelInterface
+ PortChannel4002
+ Eth6-ASIC0;Eth7-ASIC0
+
+
+
+ PortChannel0002
+ Eth0-ASIC0;Eth1-ASIC0
+
+
+
+ PortChannel0005
+ Eth2-ASIC0;Eth3-ASIC0
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4001
+ 10.1.0.1/31
+
+
+ IPInterface
+
+ PortChannel4002
+ 10.1.0.3/31
+
+
+
+ PortChannel0002
+ 10.0.0.0/31
+
+
+
+ PortChannel0002
+ FC00::1/126
+
+
+
+ PortChannel0005
+ 10.0.0.4/31
+
+
+
+ PortChannel0005
+ FC00::9/126
+
+
+
+
+
+
+
+
+
+
+
+ HostIP
+ Loopback0
+
+ 10.1.0.32/32
+
+ 10.1.0.32/32
+
+
+ HostIP1
+ Loopback0
+
+ FC00:1::32/128
+
+ FC00:1::32/128
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.1/32
+
+ 8.0.0.1/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:2::32/128
+
+ FD00:2::32/128
+
+
+
+
+
+
+
+ ASIC1
+
+
+ PortChannelInterface
+ PortChannel4003
+ Eth4-ASIC1;Eth5-ASIC1
+
+
+
+ PortChannelInterface
+ PortChannel4004
+ Eth6-ASIC1;Eth7-ASIC1
+
+
+
+ PortChannel0001
+ Eth0-ASIC1
+
+
+
+ PortChannel0003
+ Eth1-ASIC1
+
+
+
+ PortChannel0004
+ Eth2-ASIC1
+
+
+
+ PortChannel0006
+ Eth3-ASIC1
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4003
+ 10.1.0.5/31
+
+
+ IPInterface
+
+ PortChannel4004
+ 10.1.0.7/31
+
+
+
+ PortChannel0001
+ 10.0.0.32/31
+
+
+
+ PortChannel0001
+ FC00::41/126
+
+
+
+ PortChannel0003
+ 10.0.0.34/31
+
+
+
+ PortChannel0003
+ FC00::45/126
+
+
+
+ PortChannel0004
+ 10.0.0.36/31
+
+
+
+ PortChannel0004
+ FC00::49/126
+
+
+
+ PortChannel0006
+ 10.0.0.38/31
+
+
+
+ PortChannel0006
+ FC00::4D/126
+
+
+
+
+
+
+
+
+
+
+
+ HostIP
+ Loopback0
+
+ 10.1.0.32/32
+
+ 10.1.0.32/32
+
+
+ HostIP1
+ Loopback0
+
+ FC00:1::32/128
+
+ FC00:1::32/128
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.4/32
+
+ 8.0.0.4/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:3::32/128
+
+ FD00:3::32/128
+
+
+
+
+
+
+
+ ASIC2
+
+
+ PortChannelInterface
+ PortChannel4009
+ Eth0-ASIC2;Eth1-ASIC2
+
+
+
+ PortChannelInterface
+ PortChannel4010
+ Eth2-ASIC2;Eth3-ASIC2
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4009
+ 10.1.0.0/31
+
+
+ IPInterface
+
+ PortChannel4010
+ 10.1.0.4/31
+
+
+
+
+
+
+
+
+
+
+
+ HostIP
+ Loopback0
+
+ 10.1.0.32/32
+
+ 10.1.0.32/32
+
+
+ HostIP1
+ Loopback0
+
+ FC00:1::32/128
+
+ FC00:1::32/128
+
+
+ LoopbackInterface
+ HostIP
+ Loopback4096
+
+ 8.0.0.5/32
+
+ 8.0.0.5/32
+
+
+ HostIP1
+ Loopback4096
+
+ FD00:4::32/128
+
+ FD00:4::32/128
+
+
+
+
+
+
+
+ ASIC3
+
+
+ PortChannelInterface
+ PortChannel4013
+ Eth0-ASIC3;Eth1-ASIC3
+
+
+
+ PortChannelInterface
+ PortChannel4014
+ Eth2-ASIC3;Eth3-ASIC3
+
+
+
+
+
+
+
+ IPInterface
+
+ PortChannel4013
+ 10.1.0.2/31
+
+
+ IPInterface
+
+ PortChannel4014
+ 10.1.0.6/31
+
+
+
+
+
+
+
+
+
+
+
+ DeviceInterfaceLink
+ ARISTA01T0
+ Ethernet1
+ vlab-07
+ Ethernet1/5
+
+
+ DeviceInterfaceLink
+ ARISTA01T2
+ Ethernet1
+ vlab-07
+ Ethernet1/1
+
+
+ DeviceInterfaceLink
+ ARISTA01T2
+ Ethernet2
+ vlab-07
+ Ethernet1/2
+
+
+ DeviceInterfaceLink
+ ARISTA02T0
+ Ethernet1
+ vlab-07
+ Ethernet1/6
+
+
+ DeviceInterfaceLink
+ ARISTA03T0
+ Ethernet1
+ vlab-07
+ Ethernet1/7
+
+
+ DeviceInterfaceLink
+ ARISTA03T2
+ Ethernet1
+ vlab-07
+ Ethernet1/3
+
+
+ DeviceInterfaceLink
+ ARISTA03T2
+ Ethernet2
+ vlab-07
+ Ethernet1/4
+
+
+ DeviceInterfaceLink
+ ARISTA04T0
+ Ethernet1
+ vlab-07
+ Ethernet1/8
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth0-ASIC2
+ true
+ ASIC0
+ Eth4-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth1-ASIC2
+ true
+ ASIC0
+ Eth5-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth0-ASIC3
+ true
+ ASIC0
+ Eth6-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth1-ASIC3
+ true
+ ASIC0
+ Eth7-ASIC0
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth2-ASIC2
+ true
+ ASIC1
+ Eth4-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC2
+ Eth3-ASIC2
+ true
+ ASIC1
+ Eth5-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth2-ASIC3
+ true
+ ASIC1
+ Eth6-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC3
+ Eth3-ASIC3
+ true
+ ASIC1
+ Eth7-ASIC1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth0-ASIC0
+ true
+ vlab-07
+ Ethernet1/1
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth1-ASIC0
+ true
+ vlab-07
+ Ethernet1/2
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth2-ASIC0
+ true
+ vlab-07
+ Ethernet1/3
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC0
+ Eth3-ASIC0
+ true
+ vlab-07
+ Ethernet1/4
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth0-ASIC1
+ true
+ vlab-07
+ Ethernet1/5
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth1-ASIC1
+ true
+ vlab-07
+ Ethernet1/6
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth2-ASIC1
+ true
+ vlab-07
+ Ethernet1/7
+ true
+
+
+ DeviceInterfaceLink
+ 40000
+ true
+ ASIC1
+ Eth3-ASIC1
+ true
+ vlab-07
+ Ethernet1/8
+ true
+
+
+
+
+ vlab-07
+ msft_four_asic_vs
+
+ 10.250.0.112
+
+
+
+ ARISTA02T0
+
+ 10.250.0.58
+
+ Arista-VM
+
+
+ ARISTA03T0
+
+ 10.250.0.59
+
+ Arista-VM
+
+
+ ARISTA03T2
+
+ 10.250.0.56
+
+ Arista-VM
+
+
+ ARISTA04T0
+
+ 10.250.0.60
+
+ Arista-VM
+
+
+ ARISTA01T2
+
+ 10.250.0.55
+
+ Arista-VM
+
+
+ ARISTA01T0
+
+ 10.250.0.57
+
+ Arista-VM
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC0
+ msft_four_asic_vs
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC1
+ msft_four_asic_vs
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC2
+ msft_four_asic_vs
+
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ ASIC3
+ msft_four_asic_vs
+
+
+
+
+
+ true
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/1
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/2
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/3
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/4
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/5
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/6
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/7
+
+ false
+ 0
+ 0
+ 40000
+
+
+ DeviceInterface
+
+ true
+ true
+ 1
+ Ethernet1/8
+
+ false
+ 0
+ 0
+ 40000
+
+
+ true
+ 0
+ msft_four_asic_vs
+
+
+
+
+
+
+ vlab-07
+
+
+ DeploymentId
+
+ 1
+
+
+ QosProfile
+
+ Profile0
+
+
+ DhcpResources
+
+ 192.0.0.1;192.0.0.2;192.0.0.3;192.0.0.4
+
+
+ NtpResources
+
+ 10.0.0.1;10.0.0.2
+
+
+ SnmpResources
+
+ 10.0.0.9
+
+
+ SyslogResources
+
+ 10.0.0.5;10.0.0.6
+
+
+ TacacsGroup
+
+ testlab
+
+
+ TacacsServer
+
+ 10.0.0.9;10.0.0.8
+
+
+ ErspanDestinationIpv4
+
+ 10.0.0.7
+
+
+
+
+ ASIC0
+
+
+ SubRole
+
+ FrontEnd
+
+
+
+
+ ASIC1
+
+
+ SubRole
+
+ FrontEnd
+
+
+
+
+ ASIC2
+
+
+ SubRole
+
+ BackEnd
+
+
+
+
+ ASIC3
+
+
+ SubRole
+
+ BackEnd
+
+
+
+
+
+
+ vlab-07
+ msft_four_asic_vs
+
diff --git a/ansible/module_utils/port_utils.py b/ansible/module_utils/port_utils.py
index 3a7ad7192ac..10af814a924 100644
--- a/ansible/module_utils/port_utils.py
+++ b/ansible/module_utils/port_utils.py
@@ -24,6 +24,32 @@ def get_port_alias_to_name_map(hwsku):
elif hwsku == "Force10-Z9100":
for i in range(0, 128, 4):
port_alias_to_name_map["hundredGigE1/%d" % (i / 4 + 1)] = "Ethernet%d" % i
+ elif hwsku == "DellEMC-Z9332f-M-O16C64":
+ # 100G ports
+ s100G_ports = [x for x in range(0, 96, 2)] + [x for x in range(128, 160, 2)]
+
+ # 400G ports
+ s400G_ports = [x for x in range(96, 128, 8)] + [x for x in range(160, 256, 8)]
+
+ # 10G ports
+ s10G_ports = [x for x in range(256, 258)]
+
+ for i in s100G_ports:
+ alias = "hundredGigE1/{}/{}".format(((i + 8) // 8), ((i // 2) % 4) + 1)
+ port_alias_to_name_map[alias] = "Ethernet{}".format(i)
+ for i in s400G_ports:
+ alias = "fourhundredGigE1/{}".format((i // 8) + 1)
+ port_alias_to_name_map[alias] = "Ethernet{}".format(i)
+ for i in s10G_ports:
+ alias = "tenGigE1/{}".format(33 if i == 256 else 34)
+ port_alias_to_name_map[alias] = "Ethernet{}".format(i)
+ elif hwsku == "DellEMC-Z9332f-O32":
+ for i in range(0, 256, 8):
+ alias = "fourhundredGigE1/{}".format((i // 8) + 1)
+ port_alias_to_name_map[alias] = "Ethernet{}".format(i)
+ for i in range(256, 258):
+ alias = "tenGigE1/{}".format(33 if i == 256 else 34)
+ port_alias_to_name_map[alias] = "Ethernet{}".format(i)
elif hwsku == "Arista-7050-QX32":
for i in range(1, 25):
port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4)
@@ -40,6 +66,25 @@ def get_port_alias_to_name_map(hwsku):
elif hwsku == "Arista-7060CX-32S-C32" or hwsku == "Arista-7060CX-32S-Q32" or hwsku == "Arista-7060CX-32S-C32-T1" or hwsku == "Arista-7170-32CD-C32":
for i in range(1, 33):
port_alias_to_name_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4)
+ elif hwsku == "Mellanox-SN2700-D40C8S8":
+ # 10G ports
+ s10G_ports = range(0, 4) + range(8, 12)
+
+ # 50G ports
+ s50G_ports = [x for x in range(16, 24, 2)] + [x for x in range(40, 88, 2)] + [x for x in range(104, 128, 2)]
+
+ # 100G ports
+ s100G_ports = [x for x in range(24, 40, 4)] + [x for x in range(88, 104, 4)]
+
+ for i in s10G_ports:
+ alias = "etp%d" % (i / 4 + 1) + chr(ord('a') + i % 4)
+ port_alias_to_name_map[alias] = "Ethernet%d" % i
+ for i in s50G_ports:
+ alias = "etp%d" % (i / 4 + 1) + ("a" if i % 4 == 0 else "b")
+ port_alias_to_name_map[alias] = "Ethernet%d" % i
+ for i in s100G_ports:
+ alias = "etp%d" % (i / 4 + 1)
+ port_alias_to_name_map[alias] = "Ethernet%d" % i
elif hwsku == "Mellanox-SN2700-D48C8":
# 50G ports
s50G_ports = [x for x in range(0, 24, 2)] + [x for x in range(40, 88, 2)] + [x for x in range(104, 128, 2)]
@@ -101,7 +146,7 @@ def get_port_alias_to_name_map(hwsku):
elif hwsku == "Celestica-E1031-T48S4":
for i in range(1, 53):
port_alias_to_name_map["etp%d" % i] = "Ethernet%d" % ((i - 1))
- elif hwsku == "et6448m":
+ elif hwsku == "et6448m" or hwsku == "Nokia-7215":
for i in range(0, 52):
port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i
elif hwsku == "newport":
@@ -112,4 +157,3 @@ def get_port_alias_to_name_map(hwsku):
port_alias_to_name_map["Ethernet%d" % i] = "Ethernet%d" % i
return port_alias_to_name_map
-
diff --git a/ansible/recover_server.py b/ansible/recover_server.py
index 31e502dea23..8beff60a7ac 100755
--- a/ansible/recover_server.py
+++ b/ansible/recover_server.py
@@ -20,6 +20,8 @@
import time
from tabulate import tabulate
+# Add tests path to syspath
+sys.path.append('../')
ANSIBLE_DIR = os.path.abspath(os.path.dirname(__file__))
@@ -90,6 +92,12 @@ def __init__(self, tbname, passfile, log_save_dir, tbfile=None, vmfile=None, dry
self.args.extend(('start-topo-vms', tbname, passfile))
self.tbname = tbname
+class TaskStartVMs(Task):
+ """Task start-vm"""
+
+ def __init__(self, server, passfile, log_save_dir, tbfile=None, vmfile=None, dry_run=False):
+ Task.__init__(self, server + '_start_vms', log_save_dir=log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=dry_run)
+ self.args.extend(('start-vms', server, passfile))
class TaskAddTopo(Task):
"""Task add-topo."""
@@ -138,11 +146,16 @@ def __init__(self, jobname, **kwargs):
TaskCleanupVMHosts(server, passfile, log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=self.dry_run)
]
self.ignore_errors = False
+ elif jobname == 'start-vms':
+ server = kwargs['server']
+ self.tasks = [
+ TaskStartVMs(server, passfile, log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=self.dry_run)
+ ]
+ self.ignore_errors = False
elif jobname == 'init_testbed':
tbname = kwargs['tbname']
inventory = kwargs['inventory']
self.tasks = [
- TaskStartTopoVMs(tbname, passfile, log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=self.dry_run),
TaskAddTopo(tbname, passfile, log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=self.dry_run),
TaskDeployMG(tbname, inventory, passfile, log_save_dir, tbfile=tbfile, vmfile=vmfile, dry_run=self.dry_run)
]
@@ -176,8 +189,16 @@ def _print_summary(jobs):
if jobs[0].failed_task is not None:
output.append('Server %s cleanup failed, skip recovery.' % server)
jobs = jobs[1:]
+ # start-vms output
+ if jobs[0].failed_task is None:
+ start_vms_result = 'Succeed.'
+ else:
+ start_vms_result = 'Failed.'
+ output.append('Server %s start-vms result: %s ' % (server, start_vms_result))
+ jobs = jobs[1:]
+
output.append('Server %s recovery result:' % server)
- headers = [server, 'start-topo-vms', 'add-topo', 'deploy-mg']
+ headers = [server, 'add-topo', 'deploy-mg']
table = []
for job in jobs:
line = [job.tbname, ]
@@ -239,6 +260,17 @@ def _join_all(threads):
dry_run=dry_run
) for tbname in tbnames
]
+ jobs = [
+ Job(
+ 'start-vms',
+ server=server,
+ passfile=passfile,
+ tbfile=tbfile,
+ vmfile=vmfile,
+ log_save_dir=log_save_dir_per_server,
+ dry_run=dry_run
+ )
+ ] + jobs
if not skip_cleanup:
jobs = [
Job(
diff --git a/ansible/roles/eos/templates/t0-56-po2vlan-leaf.j2 b/ansible/roles/eos/templates/t0-56-po2vlan-leaf.j2
new file mode 120000
index 00000000000..8430cb1debd
--- /dev/null
+++ b/ansible/roles/eos/templates/t0-56-po2vlan-leaf.j2
@@ -0,0 +1 @@
+t0-leaf.j2
\ No newline at end of file
diff --git a/ansible/roles/eos/templates/t0-80-leaf.j2 b/ansible/roles/eos/templates/t0-80-leaf.j2
new file mode 120000
index 00000000000..8430cb1debd
--- /dev/null
+++ b/ansible/roles/eos/templates/t0-80-leaf.j2
@@ -0,0 +1 @@
+t0-leaf.j2
\ No newline at end of file
diff --git a/ansible/roles/eos/templates/t1-8-lag-spine.j2 b/ansible/roles/eos/templates/t1-8-lag-spine.j2
new file mode 100644
index 00000000000..a673d12618c
--- /dev/null
+++ b/ansible/roles/eos/templates/t1-8-lag-spine.j2
@@ -0,0 +1,131 @@
+{% set host = configuration[hostname] %}
+{% set mgmt_ip = ansible_host %}
+{% if vm_type is defined and vm_type == "ceos" %}
+{% set mgmt_if_index = 0 %}
+{% else %}
+{% set mgmt_if_index = 1 %}
+{% endif %}
+no schedule tech-support
+!
+{% if vm_type is defined and vm_type == "ceos" %}
+agent LicenseManager shutdown
+agent PowerFuse shutdown
+agent PowerManager shutdown
+agent Thermostat shutdown
+agent LedPolicy shutdown
+agent StandbyCpld shutdown
+agent Bfd shutdown
+{% endif %}
+!
+hostname {{ hostname }}
+!
+vrf definition MGMT
+ rd 1:1
+!
+spanning-tree mode mstp
+!
+aaa root secret 0 123456
+!
+username admin privilege 15 role network-admin secret 0 123456
+!
+clock timezone UTC
+!
+lldp run
+lldp management-address Management{{ mgmt_if_index }}
+lldp management-address vrf MGMT
+!
+snmp-server community {{ snmp_rocommunity }} ro
+snmp-server vrf MGMT
+!
+ip routing
+ip routing vrf MGMT
+ipv6 unicast-routing
+!
+ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }}
+!
+interface Management {{ mgmt_if_index }}
+ description TO LAB MGMT SWITCH
+{% if vm_type is defined and vm_type == "ceos" %}
+ vrf MGMT
+{% else %}
+ vrf forwarding MGMT
+{% endif %}
+ ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }}
+ no shutdown
+!
+{% for name, iface in host['interfaces'].items() %}
+interface {{ name }}
+{% if name.startswith('Loopback') %}
+ description LOOPBACK
+{% else %}
+ no switchport
+{% endif %}
+{% if name.startswith('Port-Channel') %}
+ port-channel min-links 2
+{% endif %}
+{% if iface['lacp'] is defined %}
+ channel-group {{ iface['lacp'] }} mode active
+ lacp rate normal
+{% endif %}
+{% if iface['ipv4'] is defined %}
+ ip address {{ iface['ipv4'] }}
+{% endif %}
+{% if iface['ipv6'] is defined %}
+ ipv6 enable
+ ipv6 address {{ iface['ipv6'] }}
+ ipv6 nd ra suppress
+{% endif %}
+ no shutdown
+!
+{% endfor %}
+!
+interface {{ bp_ifname }}
+ description backplane
+ no switchport
+{% if host['bp_interface']['ipv4'] is defined %}
+ ip address {{ host['bp_interface']['ipv4'] }}
+{% endif %}
+{% if host['bp_interface']['ipv6'] is defined %}
+ ipv6 enable
+ ipv6 address {{ host['bp_interface']['ipv6'] }}
+ ipv6 nd ra suppress
+{% endif %}
+ no shutdown
+!
+router bgp {{ host['bgp']['asn'] }}
+ router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }}
+ !
+{% for asn, remote_ips in host['bgp']['peers'].items() %}
+{% for remote_ip in remote_ips %}
+ neighbor {{ remote_ip }} remote-as {{ asn }}
+ neighbor {{ remote_ip }} description {{ asn }}
+{% if remote_ip | ipv6 %}
+ address-family ipv6
+ neighbor {{ remote_ip }} activate
+ exit
+{% endif %}
+{% endfor %}
+{% endfor %}
+ neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }}
+ neighbor {{ props.nhipv4 }} description exabgp_v4
+ neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }}
+ neighbor {{ props.nhipv6 }} description exabgp_v6
+ address-family ipv6
+ neighbor {{ props.nhipv6 }} activate
+ exit
+ !
+{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %}
+{% if iface['ipv4'] is defined %}
+ network {{ iface['ipv4'] }}
+{% endif %}
+{% if iface['ipv6'] is defined %}
+ network {{ iface['ipv6'] }}
+{% endif %}
+{% endfor %}
+!
+management api http-commands
+ no protocol https
+ protocol http
+ no shutdown
+!
+end
diff --git a/ansible/roles/eos/templates/t1-8-lag-tor.j2 b/ansible/roles/eos/templates/t1-8-lag-tor.j2
new file mode 100644
index 00000000000..92d52642baf
--- /dev/null
+++ b/ansible/roles/eos/templates/t1-8-lag-tor.j2
@@ -0,0 +1,135 @@
+{% set host = configuration[hostname] %}
+{% set mgmt_ip = ansible_host %}
+{% set tornum = host['tornum'] %}
+{% if vm_type is defined and vm_type == "ceos" %}
+{% set mgmt_if_index = 0 %}
+{% else %}
+{% set mgmt_if_index = 1 %}
+{% endif %}
+no schedule tech-support
+!
+{% if vm_type is defined and vm_type == "ceos" %}
+agent LicenseManager shutdown
+agent PowerFuse shutdown
+agent PowerManager shutdown
+agent Thermostat shutdown
+agent LedPolicy shutdown
+agent StandbyCpld shutdown
+agent Bfd shutdown
+{% endif %}
+!
+hostname {{ hostname }}
+!
+vrf definition MGMT
+ rd 1:1
+!
+spanning-tree mode mstp
+!
+aaa root secret 0 123456
+!
+username admin privilege 15 role network-admin secret 0 123456
+!
+clock timezone UTC
+!
+lldp run
+lldp management-address Management{{ mgmt_if_index }}
+lldp management-address vrf MGMT
+!
+snmp-server community {{ snmp_rocommunity }} ro
+snmp-server vrf MGMT
+!
+ip routing
+ip routing vrf MGMT
+ipv6 unicast-routing
+!
+ip route vrf MGMT 0.0.0.0/0 {{ mgmt_gw }}
+!
+interface Management {{ mgmt_if_index }}
+ description TO LAB MGMT SWITCH
+ {% if vm_type is defined and vm_type == "ceos" %}
+ vrf MGMT
+{% else %}
+ vrf forwarding MGMT
+ {% endif %}
+ ip address {{ mgmt_ip }}/{{ mgmt_prefixlen }}
+ no shutdown
+!
+{% for name, iface in host['interfaces'].items() %}
+interface {{ name }}
+{% if name.startswith('Loopback') %}
+ description LOOPBACK
+{% else %}
+ no switchport
+{% endif %}
+{% if name.startswith('Port-Channel') %}
+ port-channel min-links 1
+{% endif %}
+{% if iface['ipv4'] is defined %}
+ ip address {{ iface['ipv4'] }}
+{% endif %}
+{% if iface['ipv6'] is defined %}
+ ipv6 enable
+ ipv6 address {{ iface['ipv6'] }}
+ ipv6 nd ra suppress
+{% endif %}
+{% if iface['lacp'] is defined %}
+ channel-group {{ iface['lacp'] }} mode active
+ lacp rate normal
+{% endif %}
+ no shutdown
+!
+{% endfor %}
+!
+interface {{ bp_ifname }}
+ description backplane
+ no switchport
+{% if host['bp_interface']['ipv4'] is defined %}
+ ip address {{ host['bp_interface']['ipv4'] }}
+{% endif %}
+{% if host['bp_interface']['ipv6'] is defined %}
+ ipv6 enable
+ ipv6 address {{ host['bp_interface']['ipv6'] }}
+ ipv6 nd ra suppress
+{% endif %}
+ no shutdown
+!
+router bgp {{ host['bgp']['asn'] }}
+ router-id {{ host['interfaces']['Loopback0']['ipv4'] | ipaddr('address') }}
+ !
+ graceful-restart restart-time {{ bgp_gr_timer }}
+ graceful-restart
+ !
+{% for asn, remote_ips in host['bgp']['peers'].items() %}
+{% for remote_ip in remote_ips %}
+ neighbor {{ remote_ip }} remote-as {{ asn }}
+ neighbor {{ remote_ip }} description {{ asn }}
+{% if remote_ip | ipv6 %}
+ address-family ipv6
+ neighbor {{ remote_ip }} activate
+ exit
+{% endif %}
+{% endfor %}
+{% endfor %}
+ neighbor {{ props.nhipv4 }} remote-as {{ host['bgp']['asn'] }}
+ neighbor {{ props.nhipv4 }} description exabgp_v4
+ neighbor {{ props.nhipv6 }} remote-as {{ host['bgp']['asn'] }}
+ neighbor {{ props.nhipv6 }} description exabgp_v6
+ address-family ipv6
+ neighbor {{ props.nhipv6 }} activate
+ exit
+ !
+{% for name, iface in host['interfaces'].items() if name.startswith('Loopback') %}
+{% if iface['ipv4'] is defined %}
+ network {{ iface['ipv4'] }}
+{% endif %}
+{% if iface['ipv6'] is defined %}
+ network {{ iface['ipv6'] }}
+{% endif %}
+{% endfor %}
+!
+management api http-commands
+ no protocol https
+ protocol http
+ no shutdown
+!
+end
diff --git a/ansible/roles/test/files/ptftests/advanced-reboot.py b/ansible/roles/test/files/ptftests/advanced-reboot.py
index 75aa5fe2bb3..a3a4da42a36 100644
--- a/ansible/roles/test/files/ptftests/advanced-reboot.py
+++ b/ansible/roles/test/files/ptftests/advanced-reboot.py
@@ -155,6 +155,7 @@ def __init__(self):
self.check_param('vnet', False, required=False)
self.check_param('vnet_pkts', None, required=False)
self.check_param('target_version', '', required=False)
+ self.check_param('bgp_v4_v6_time_diff', 40, required=False)
if not self.test_params['preboot_oper'] or self.test_params['preboot_oper'] == 'None':
self.test_params['preboot_oper'] = None
if not self.test_params['inboot_oper'] or self.test_params['inboot_oper'] == 'None':
@@ -871,6 +872,7 @@ def handle_post_reboot_health_check(self):
def wait_for_ssh_threads(signal):
while any(thr.is_alive() for thr, _ in self.ssh_jobs) and not signal.is_set():
+ self.log('Waiting till SSH threads stop')
time.sleep(self.TIMEOUT)
for thr, _ in self.ssh_jobs:
@@ -1147,8 +1149,10 @@ def cmd(self, cmds):
return stdout, stderr, return_code
def peer_state_check(self, ip, queue):
- ssh = Arista(ip, queue, self.test_params)
+ self.log('SSH thread for VM {} started'.format(ip))
+ ssh = Arista(ip, queue, self.test_params, log_cb=self.log)
self.fails[ip], self.info[ip], self.cli_info[ip], self.logs_info[ip] = ssh.run()
+ self.log('SSH thread for VM {} finished'.format(ip))
def wait_until_cpu_port_down(self, signal):
while not signal.is_set():
diff --git a/ansible/roles/test/files/ptftests/arista.py b/ansible/roles/test/files/ptftests/arista.py
index 53885eb62f0..dfd47555a25 100644
--- a/ansible/roles/test/files/ptftests/arista.py
+++ b/ansible/roles/test/files/ptftests/arista.py
@@ -35,9 +35,10 @@
class Arista(object):
DEBUG = False
- def __init__(self, ip, queue, test_params, login='admin', password='123456'):
+ def __init__(self, ip, queue, test_params, log_cb=None, login='admin', password='123456'):
self.ip = ip
self.queue = queue
+ self.log_cb = log_cb
self.login = login
self.password = password
self.conn = None
@@ -48,10 +49,15 @@ def __init__(self, ip, queue, test_params, login='admin', password='123456'):
self.info = set()
self.min_bgp_gr_timeout = int(test_params['min_bgp_gr_timeout'])
self.reboot_type = test_params['reboot_type']
+ self.bgp_v4_v6_time_diff = test_params['bgp_v4_v6_time_diff']
def __del__(self):
self.disconnect()
+ def log(self, msg):
+ if self.log_cb is not None:
+ self.log_cb('SSH thread VM={}: {}'.format(self.ip, msg))
+
def connect(self):
self.conn = paramiko.SSHClient()
self.conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
@@ -117,6 +123,7 @@ def run(self):
while not (quit_enabled and v4_routing_ok and v6_routing_ok):
cmd = self.queue.get()
if cmd == 'quit':
+ self.log('quit command received')
quit_enabled = True
continue
cur_time = time.time()
@@ -159,20 +166,29 @@ def run(self):
attempts = 60
log_present = False
- for _ in range(attempts):
+ log_data = {}
+ for attempt in range(attempts):
+ self.log('Collecting logs for attempt {}'.format(attempt))
log_output = self.do_cmd("show log | begin %s" % log_first_line)
+ self.log('Log output "{}"'.format(log_output))
log_lines = log_output.split("\r\n")[1:-1]
- log_data = self.parse_logs(log_lines)
- if (self.reboot_type == 'fast-reboot' and \
- any(k.startswith('BGP') for k in log_data) and any(k.startswith('PortChannel') for k in log_data)) \
- or (self.reboot_type == 'warm-reboot' and any(k.startswith('BGP') for k in log_data)):
- log_present = True
- break
- time.sleep(1) # wait until logs are populated
+ try:
+ log_data = self.parse_logs(log_lines)
+ if (self.reboot_type == 'fast-reboot' and \
+ any(k.startswith('BGP') for k in log_data) and any(k.startswith('PortChannel') for k in log_data)) \
+ or (self.reboot_type == 'warm-reboot' and any(k.startswith('BGP') for k in log_data)):
+ log_present = True
+ break
+ time.sleep(1) # wait until logs are populated
+ except Exception as err:
+ msg = 'Exception occured when parsing logs from VM: msg={} type={}'.format(err, type(err))
+ self.log(msg)
+ self.fails.add(msg)
if not log_present:
log_data['error'] = 'Incomplete output'
+ self.log('Disconnecting from VM')
self.disconnect()
# save data for troubleshooting
@@ -186,6 +202,7 @@ def run(self):
with open("/tmp/%s.logging" % self.ip, "w") as fp:
fp.write("\n".join(log_lines))
+ self.log('Checking BGP GR peer status on VM')
self.check_gr_peer_status(data)
cli_data = {}
cli_data['lacp'] = self.check_series_status(data, "lacp", "LACP session")
@@ -193,15 +210,17 @@ def run(self):
cli_data['bgp_v6'] = self.check_series_status(data, "bgp_route_v6", "BGP v6 routes")
cli_data['po'] = self.check_change_time(samples, "po_changetime", "PortChannel interface")
- route_timeout = log_data['route_timeout']
- cli_data['route_timeout'] = route_timeout
+ if 'route_timeout' in log_data:
+ route_timeout = log_data['route_timeout']
+ cli_data['route_timeout'] = route_timeout
- # {'10.0.0.38': [(0, '4200065100)')], 'fc00::2d': [(0, '4200065100)')]}
- for nei in route_timeout.keys():
- asn = route_timeout[nei][0][-1]
- msg = 'BGP route GR timeout: neighbor %s (ASN %s' % (nei, asn)
- self.fails.add(msg)
+ # {'10.0.0.38': [(0, '4200065100)')], 'fc00::2d': [(0, '4200065100)')]}
+ for nei in route_timeout.keys():
+ asn = route_timeout[nei][0][-1]
+ msg = 'BGP route GR timeout: neighbor %s (ASN %s' % (nei, asn)
+ self.fails.add(msg)
+ self.log('Finishing run()')
return self.fails, self.info, cli_data, log_data
def extract_from_logs(self, regexp, data):
@@ -248,24 +267,29 @@ def parse_logs(self, data):
# first state is Idle, last state is Established
for events in result_bgp.values():
if len(events) > 1:
- assert(events[0][1] != 'Established')
+ first_state = events[0][1]
+ assert first_state != 'Established', 'First BGP state should not be Established, it was {}'.format(first_state)
- assert(events[-1][1] == 'Established')
+ last_state = events[-1][1]
+ assert last_state == 'Established', 'Last BGP state is not Established, it was {}'.format(last_state)
- # verify BGP establishment time between v4 and v6 peer is not more than 20s
+ # verify BGP establishment time between v4 and v6 peer is not more than self.bgp_v4_v6_time_diff
if self.reboot_type == 'warm-reboot':
estab_time = 0
for ip in result_bgp:
if estab_time > 0:
diff = abs(result_bgp[ip][-1][0] - estab_time)
- assert(diff <= 20)
+ assert diff <= self.bgp_v4_v6_time_diff, \
+ 'BGP establishement time between v4 and v6 peer is longer than {} sec, it was {}'.format(self.bgp_v4_v6_time_diff, diff)
break
estab_time = result_bgp[ip][-1][0]
# first state is down, last state is up
for events in result_if.values():
- assert(events[0][1] == 'down')
- assert(events[-1][1] == 'up')
+ first_state = events[0][1]
+ last_state = events[-1][1]
+ assert first_state == 'down', 'First PO state should be down, it was {}'.format(first_state)
+ assert last_state == 'up', 'Last PO state should be up, it was {}'.format(last_state)
neigh_ipv4 = [neig_ip for neig_ip in result_bgp.keys() if '.' in neig_ip][0]
for neig_ip in result_bgp.keys():
diff --git a/ansible/roles/test/files/ptftests/dhcp_relay_test.py b/ansible/roles/test/files/ptftests/dhcp_relay_test.py
index 1d1ca62066c..1db53a4af9e 100644
--- a/ansible/roles/test/files/ptftests/dhcp_relay_test.py
+++ b/ansible/roles/test/files/ptftests/dhcp_relay_test.py
@@ -118,6 +118,12 @@ def setUp(self):
self.client_port_index = int(self.test_params['client_port_index'])
self.client_mac = self.dataplane.get_mac(0, self.client_port_index)
+ self.switch_loopback_ip = self.test_params['switch_loopback_ip']
+
+ # 'dual' for dual tor testing
+ # 'single' for regular single tor testing
+ self.dual_tor = (self.test_params['testing_mode'] == 'dual')
+
# option82 is a byte string created by the relay agent. It contains the circuit_id and remote_id fields.
# circuit_id is stored as suboption 1 of option 82.
# It consists of the following:
@@ -139,6 +145,16 @@ def setUp(self):
self.option82 += struct.pack('BB', 2, len(remote_id_string))
self.option82 += remote_id_string
+ # In 'dual' testing mode, vlan ip is stored as suboption 5 of option 82.
+ # It consists of the following:
+ # Byte 0: Suboption number, always set to 5
+ # Byte 1: Length of suboption data in bytes, always set to 4 (ipv4 addr has 4 bytes)
+ # Bytes 2+: vlan ip addr
+ if self.dual_tor:
+ link_selection = ''.join([chr(int(byte)) for byte in self.relay_iface_ip.split('.')])
+ self.option82 += struct.pack('BB', 5, 4)
+ self.option82 += link_selection
+
# We'll assign our client the IP address 1 greater than our relay interface (i.e., gateway) IP
self.client_ip = incrementIpAddress(self.relay_iface_ip, 1)
self.client_subnet = self.test_params['relay_iface_netmask']
@@ -195,7 +211,7 @@ def create_dhcp_discover_relayed_packet(self):
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.DEFAULT_ROUTE_IP,
siaddr=self.DEFAULT_ROUTE_IP,
- giaddr=self.relay_iface_ip,
+ giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'discover'),
('relay_agent_Information', self.option82),
@@ -214,10 +230,10 @@ def create_dhcp_offer_packet(self):
eth_dst=self.relay_iface_mac,
eth_client=self.client_mac,
ip_server=self.server_ip,
- ip_dst=self.relay_iface_ip,
+ ip_dst=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
ip_offered=self.client_ip,
port_dst=self.DHCP_SERVER_PORT,
- ip_gateway=self.relay_iface_ip,
+ ip_gateway=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
netmask_client=self.client_subnet,
dhcp_lease=self.LEASE_TIME,
padding_bytes=0,
@@ -246,7 +262,7 @@ def create_dhcp_offer_relayed_packet(self):
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.client_ip,
siaddr=self.server_ip,
- giaddr=self.relay_iface_ip,
+ giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'offer'),
('server_id', self.server_ip),
@@ -300,7 +316,7 @@ def create_dhcp_request_relayed_packet(self):
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.DEFAULT_ROUTE_IP,
siaddr=self.DEFAULT_ROUTE_IP,
- giaddr=self.relay_iface_ip,
+ giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'request'),
('requested_addr', self.client_ip),
@@ -321,10 +337,10 @@ def create_dhcp_ack_packet(self):
eth_dst=self.relay_iface_mac,
eth_client=self.client_mac,
ip_server=self.server_ip,
- ip_dst=self.relay_iface_ip,
+ ip_dst=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
ip_offered=self.client_ip,
port_dst=self.DHCP_SERVER_PORT,
- ip_gateway=self.relay_iface_ip,
+ ip_gateway=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
netmask_client=self.client_subnet,
dhcp_lease=self.LEASE_TIME,
padding_bytes=0,
@@ -353,7 +369,7 @@ def create_dhcp_ack_relayed_packet(self):
ciaddr=self.DEFAULT_ROUTE_IP,
yiaddr=self.client_ip,
siaddr=self.server_ip,
- giaddr=self.relay_iface_ip,
+ giaddr=self.relay_iface_ip if not self.dual_tor else self.switch_loopback_ip,
chaddr=my_chaddr)
bootp /= scapy.DHCP(options=[('message-type', 'ack'),
('server_id', self.server_ip),
diff --git a/ansible/roles/test/files/ptftests/dualtor_sniffer.py b/ansible/roles/test/files/ptftests/dualtor_sniffer.py
new file mode 100644
index 00000000000..56285e8a676
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/dualtor_sniffer.py
@@ -0,0 +1,57 @@
+"""
+PTF test script to be used by dualtor dataplane utilities.
+This ptf test, uses Scapy to sniff packets based on the filter and timeout provided.
+Captured packets are dumped into a pcap file which later can be extracted from ptf.
+"""
+
+import ptf
+from ptf.base_tests import BaseTest
+import ptf.testutils as testutils
+import scapy.all as scapyall
+import socket
+import logging
+
+from ptf import config # lgtm[py/unused-import]
+
+SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
+
+
+class Sniff(BaseTest):
+ def __init__(self):
+ BaseTest.__init__(self)
+ self.sniff_timeout = testutils.test_params_get().get("sniff_timeout")
+ self.sniff_filter = testutils.test_params_get().get("sniff_filter")
+ self.capture_pcap = testutils.test_params_get().get("capture_pcap")
+ self.sniffer_log = testutils.test_params_get().get("sniffer_logs")
+ self.port_filter_expression = testutils.test_params_get().get("port_filter_expression")
+
+
+ def setUp(self):
+ self.dataplane = ptf.dataplane_instance
+ logging.info("Setting socket configuration and filters")
+ for p in self.dataplane.ports.values():
+ port = p.get_packet_source()
+ port.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, SOCKET_RECV_BUFFER_SIZE)
+ #scapyall.attach_filter(port.socket, self.port_filter_expression)
+ logging.info("Socket configuration and filters complete")
+
+
+ def runTest(self):
+ """
+ @summary: Sniff packets based on given filters and timeout
+ """
+ logging.info("Scappy sniffer started with wait {} and filter: {}".format(self.sniff_timeout, self.sniff_filter))
+ self.packets = scapyall.sniff(timeout=self.sniff_timeout, filter=self.sniff_filter)
+ logging.info("Scappy sniffer ended")
+ self.save_sniffed_packets()
+
+
+ def save_sniffed_packets(self):
+ """
+ @summary: Dump all the captured packets into a pcap file
+ """
+ if self.packets:
+ scapyall.wrpcap(self.capture_pcap, self.packets)
+ logging.info("Pcap file dumped to {}".format(self.capture_pcap))
+ else:
+ logging.info("Pcap file is empty")
diff --git a/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py b/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py
new file mode 100644
index 00000000000..6c37a912109
--- /dev/null
+++ b/ansible/roles/test/files/ptftests/ip_in_ip_tunnel_test.py
@@ -0,0 +1,200 @@
+'''
+Description: This file contains the IPinIP test for dualtor testbed
+
+Usage: Examples of how to start this script
+ /usr/bin/ptf --test-dir ptftests ip_in_ip_tunnel_test.IpinIPTunnelTest --platform-dir ptftests --qlen=2000 --platform remote -t hash_key_list=['src-port', 'dst-port', 'src-mac', 'dst-mac', 'src-ip'];server_ip='192.168.0.2';active_tor_ip='10.1.0.33';standby_tor_mac='d4:af:f7:4d:af:18';standby_tor_ip='10.1.0.32';active_tor_mac='d4:af:f7:4d:a4:44';ptf_portchannel_indices={u'PortChannel0001': [29], u'PortChannel0003': [33], u'PortChannel0002': [31], u'PortChannel0004': [35]} --relax --debug info --log-file /tmp/ip_in_ip_tunnel_test.2021-02-10-07:14:46.log --socket-recv-size 16384
+
+'''
+#---------------------------------------------------------------------
+# Global imports
+#---------------------------------------------------------------------
+import logging
+import random
+from ipaddress import ip_address
+import ptf
+from scapy.all import IP, Ether
+import ptf.packet as scapy
+from ptf.base_tests import BaseTest
+from ptf.mask import Mask
+from ptf.testutils import *
+
+# packet count for verifying traffic is forwarded via IPinIP tunnel
+PACKET_NUM = 10000
+# packet count for verifying traffic is not forwarded from standby tor to server directly
+PACKET_NUM_FOR_NEGATIVE_CHECK = 100
+
+DIFF = 0.25 # The valid range for balance check
+SRC_IP_RANGE = [unicode('8.0.0.0'), unicode('8.255.255.255')]
+TIMEOUT = 1
+
+class IpinIPTunnelTest(BaseTest):
+ '''
+ @summary: Overview of functionality
+ This script send traffic to standby ToR, and capture traffic
+ on all portchannel interfaces to check balance.
+ '''
+ def __init__(self):
+ '''
+ @summary: constructor
+ '''
+ BaseTest.__init__(self)
+ self.test_params = test_params_get()
+
+ def setUp(self):
+ self.server_ip = self.test_params['server_ip']
+ self.server_port = int(self.test_params['server_port'])
+ self.vlan_mac = self.test_params['vlan_mac']
+ self.active_tor_mac = self.test_params['active_tor_mac']
+ self.standby_tor_mac = self.test_params['standby_tor_mac']
+ self.active_tor_ip = self.test_params['active_tor_ip']
+ self.standby_tor_ip = self.test_params['standby_tor_ip']
+ self.ptf_portchannel_indices = self.test_params['ptf_portchannel_indices']
+ self.indice_to_portchannel = {}
+ for port_channel, indices in self.ptf_portchannel_indices.items():
+ for indice in indices:
+ self.indice_to_portchannel[indice] = port_channel
+
+ self.hash_key_list = self.test_params['hash_key_list']
+ self.dataplane = ptf.dataplane_instance
+
+ def runTest(self):
+ """
+ Entrypoint of test script.
+ """
+ self.send_and_verify_packets()
+
+ def random_ip(self, begin, end):
+ """
+ Generate a random IP from given ip range
+ """
+ length = int(ip_address(end)) - int(ip_address(begin))
+ return str(ip_address(begin) + random.randint(0, length))
+
+ def generate_packet_to_server(self, hash_key):
+ """
+ Generate a packet to server. The value of field in packet is filled with random value according to hash_key
+ """
+ base_src_mac = self.dataplane.get_mac(0, 0)
+ ip_src = self.random_ip(SRC_IP_RANGE[0], SRC_IP_RANGE[1]) if hash_key == 'src-ip' else SRC_IP_RANGE[0]
+ ip_dst = self.server_ip
+ sport = random.randint(1, 65535) if hash_key == 'src-port' else 1234
+ dport = random.randint(1, 65535) if hash_key == 'dst-port' else 80
+ src_mac = (base_src_mac[:-5] + "%02x" % random.randint(0, 255) + ":" + "%02x" % random.randint(0, 255)) if hash_key == 'src-mac' else base_src_mac
+ dst_mac = self.standby_tor_mac
+ vlan_id = random.randint(1, 4094) if hash_key == 'vlan-id' else 0
+ pkt = simple_tcp_packet(pktlen=128 if vlan_id == 0 else 132,
+ eth_dst=dst_mac,
+ eth_src=src_mac,
+ dl_vlan_enable=False if vlan_id == 0 else True,
+ vlan_vid=vlan_id,
+ vlan_pcp=0,
+ ip_src=ip_src,
+ ip_dst=ip_dst,
+ tcp_sport=sport,
+ tcp_dport=dport,
+ ip_ttl=64)
+ return pkt
+
+ def generate_expected_packet(self, inner_pkt):
+ """
+ Generate ip_in_ip packet for verifying.
+ """
+ inner_pkt = inner_pkt.copy()
+ inner_pkt.ttl = inner_pkt.ttl - 1
+ pkt = scapy.Ether(dst=self.active_tor_mac, src=self.standby_tor_mac) / \
+ scapy.IP(src=self.standby_tor_ip, dst=self.active_tor_ip) / inner_pkt[IP]
+ exp_pkt = Mask(pkt)
+ exp_pkt.set_do_not_care_scapy(scapy.Ether, 'dst')
+
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "ihl")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "tos")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "len")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "id")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "flags")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "frag")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "proto")
+ exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
+
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "sport")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "seq")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "ack")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "reserved")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "dataofs")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "window")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "chksum")
+ exp_pkt.set_do_not_care_scapy(scapy.TCP, "urgptr")
+ exp_pkt.set_ignore_extra_bytes()
+
+ return exp_pkt
+
+ def generate_unexpected_packet(self, inner_pkt):
+ """
+ Generate a packet that shouldn't be observed.
+ All packet should be forward via tunnel, so no packet should be observed on server port
+ """
+ pkt = inner_pkt.copy()
+ pkt[Ether].src = self.vlan_mac
+ # TTL of packets from active tor to server is decreased by 1
+ pkt[IP].ttl -= 1
+ unexpected_packet = Mask(pkt)
+ # Ignore dst mac
+ unexpected_packet.set_do_not_care_scapy(scapy.Ether, 'dst')
+
+ # Ignore check sum
+ unexpected_packet.set_do_not_care_scapy(scapy.IP, "chksum")
+
+ #Ignore extra bytes
+ unexpected_packet.set_ignore_extra_bytes()
+
+ return unexpected_packet
+
+ def check_balance(self, pkt_distribution, hash_key):
+ portchannel_num = len(self.ptf_portchannel_indices)
+ expect_packet_num = PACKET_NUM / portchannel_num
+ pkt_num_lo = expect_packet_num * (1.0 - DIFF)
+ pkt_num_hi = expect_packet_num * (1.0 + DIFF)
+ logging.info("hash key = {}".format(hash_key))
+ logging.info("%-10s \t %10s \t %10s \t" % ("port(s)", "exp_cnt", "act_cnt"))
+ balance = True
+ for portchannel, count in pkt_distribution.items():
+ logging.info("%-10s \t %10s \t %10s \t" % (portchannel, str(expect_packet_num), str(count)))
+ if count < pkt_num_lo or count > pkt_num_hi:
+ balance = False
+ if not balance:
+ print("Check balance failed for {}".format(hash_key))
+ assert(balance)
+
+ def send_and_verify_packets(self):
+ """
+ Send packet from ptf (T1) to standby ToR, and verify
+ """
+ dst_ports = self.indice_to_portchannel.keys()
+ # Select the first ptf indice as src port
+ src_port = dst_ports[0]
+ # Step 1. verify no packet is received from standby_tor to server
+ for i in range(0, PACKET_NUM_FOR_NEGATIVE_CHECK):
+ inner_pkt = self.generate_packet_to_server('src-ip')
+ unexpected_packet = self.generate_unexpected_packet(inner_pkt)
+ send_packet(self, src_port, inner_pkt)
+ verify_no_packet(test=self,
+ port_id=self.server_port,
+ pkt=unexpected_packet,
+ timeout=TIMEOUT)
+ # Step 2. verify packet is received from IPinIP tunnel and check balance
+ for hash_key in self.hash_key_list:
+ pkt_distribution = {}
+ for i in range(0, PACKET_NUM):
+ inner_pkt = self.generate_packet_to_server(hash_key)
+ tunnel_pkt = self.generate_expected_packet(inner_pkt)
+ send_packet(self, src_port, inner_pkt)
+ # Verify packet is received from IPinIP tunnel
+ idx, count = verify_packet_any_port(test=self,
+ pkt=tunnel_pkt,
+ ports=dst_ports,
+ device_number=0,
+ timeout=TIMEOUT)
+ pkt_distribution[self.indice_to_portchannel[dst_ports[idx]]] = pkt_distribution.get(self.indice_to_portchannel[dst_ports[idx]], 0) + 1
+ self.check_balance(pkt_distribution, hash_key)
+
+
diff --git a/ansible/roles/test/files/ptftests/mtu_test.py b/ansible/roles/test/files/ptftests/mtu_test.py
index 4645980424f..1691177279d 100644
--- a/ansible/roles/test/files/ptftests/mtu_test.py
+++ b/ansible/roles/test/files/ptftests/mtu_test.py
@@ -50,6 +50,11 @@ def setUp(self):
self.router_mac = self.test_params['router_mac']
self.testbed_type = self.test_params['testbed_type']
self.testbed_mtu = self.test_params['testbed_mtu']
+ self.src_host_ip = self.test_params.get('src_host_ip')
+ self.src_router_ip = self.test_params.get('src_router_ip')
+ self.dst_host_ip = self.test_params.get('dst_host_ip')
+ self.src_ptf_port_list = self.test_params.get('src_ptf_port_list')
+ self.dst_ptf_port_list = self.test_params.get('dst_ptf_port_list')
#---------------------------------------------------------------------
@@ -57,9 +62,9 @@ def check_icmp_mtu(self):
'''
@summary: Check ICMP/Ping to DUT works for MAX MTU.
'''
- ip_src = "10.0.0.1"
- ip_dst = "10.0.0.0"
- src_mac = self.dataplane.get_mac(0, 0)
+ ip_src = self.src_host_ip
+ ip_dst = self.src_router_ip
+ src_mac = self.dataplane.get_mac(0, self.src_ptf_port_list[0])
pktlen = self.pktlen
pkt = simple_icmp_packet(pktlen=pktlen,
@@ -82,10 +87,10 @@ def check_icmp_mtu(self):
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
masked_exp_pkt.set_do_not_care_scapy(scapy.ICMP, "chksum")
- src_port = 0
+ src_port = self.src_ptf_port_list[0]
send_packet(self, src_port, pkt)
logging.info("Sending packet from port " + str(src_port) + " to " + ip_dst)
- dst_port_list = [0,1]
+ dst_port_list = self.src_ptf_port_list
(matched_index, received) = verify_packet_any_port(self, masked_exp_pkt, dst_port_list)
@@ -102,9 +107,9 @@ def check_ip_mtu(self):
'''
@summary: Check unicast IP forwarding in DUT works for MAX MTU.
'''
- ip_src = "10.0.0.1"
- ip_dst = "10.0.0.63"
- src_mac = self.dataplane.get_mac(0, 0)
+ ip_src = self.src_host_ip
+ ip_dst = self.dst_host_ip
+ src_mac = self.dataplane.get_mac(0, self.src_ptf_port_list[0])
pkt = simple_ip_packet(pktlen=self.pktlen,
eth_dst=self.router_mac,
@@ -122,15 +127,11 @@ def check_ip_mtu(self):
masked_exp_pkt = Mask(exp_pkt)
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
- src_port = 0
+ src_port = self.src_ptf_port_list[0]
send_packet(self, src_port, pkt)
logging.info("Sending packet from port " + str(src_port) + " to " + ip_dst)
- dst_port_list = []
- if self.testbed_type == 't1' or self.testbed_type == 't1-lag':
- dst_port_list = [31]
- elif self.testbed_type == 't1-64-lag' or self.testbed_type == 't1-64-lag-clet':
- dst_port_list = [58]
+ dst_port_list = self.dst_ptf_port_list
(matched_index, received) = verify_packet_any_port(self, masked_exp_pkt, dst_port_list)
assert received
diff --git a/ansible/roles/test/files/ptftests/pfc_pause_test.py b/ansible/roles/test/files/ptftests/pfc_pause_test.py
index 7fdf5a156d2..ea41dfb22d3 100755
--- a/ansible/roles/test/files/ptftests/pfc_pause_test.py
+++ b/ansible/roles/test/files/ptftests/pfc_pause_test.py
@@ -1,5 +1,8 @@
+import datetime
+import glob
import ipaddress
import logging
+import os
import random
import socket
import sys
@@ -10,6 +13,7 @@
import ptf
import ptf.packet as scapy
import ptf.dataplane as dataplane
+import scapy as sc
from ptf import config
from ptf.base_tests import BaseTest
@@ -31,7 +35,7 @@ def capture_matched_packets(test, exp_packet, port, device_number=0, timeout=1):
"""
if timeout <= 0:
raise Exception("%s() requires positive timeout value." % sys._getframe().f_code.co_name)
-
+
pkts = list()
while True:
result = dp_poll(test, device_number=device_number, port_number=port, timeout=timeout)
@@ -40,9 +44,9 @@ def capture_matched_packets(test, exp_packet, port, device_number=0, timeout=1):
pkts.append(result.packet)
else:
break
-
- return pkts
-
+
+ return pkts
+
class PfcPauseTest(BaseTest):
def __init__(self):
BaseTest.__init__(self)
@@ -54,7 +58,7 @@ def setUp(self):
self.mac_src = self.test_params['mac_src']
self.mac_dst = self.test_params['mac_dst']
self.pkt_count = int(self.test_params['pkt_count'])
- self.pkt_intvl = float(self.test_params['pkt_intvl'])
+ self.pkt_intvl = float(self.test_params['pkt_intvl'])
self.port_src = int(self.test_params['port_src'])
self.port_dst = self.test_params['port_dst']
self.ip_src = self.test_params['ip_src']
@@ -65,34 +69,42 @@ def setUp(self):
self.queue_paused = self.test_params['queue_paused']
""" if DUT has MAC information """
self.dut_has_mac = self.test_params['dut_has_mac']
-
+ self.debug = self.test_params.get('debug', False)
+
def runTest(self):
pass_cnt = 0
tos = self.dscp<<2
tos_bg = self.dscp_bg<<2
-
+ if self.debug:
+ # remove previous debug files
+ files = glob.glob("/tmp/pfc_pause_{}*".format(self.dscp))
+ for file in files:
+ os.remove(file)
+ current_time = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
+ log_file = open("/tmp/pfc_pause_{}_{}".format(self.dscp, current_time), "w")
+
""" If DUT needs to learn MAC addresses """
- if not self.dut_has_mac:
+ if not self.dut_has_mac:
pkt = simple_udp_packet(
eth_dst=self.mac_dst,
eth_src=self.mac_src,
ip_src=self.ip_src,
ip_dst=self.ip_dst)
-
+
send_packet(self, self.port_src, pkt, 5)
-
+
pkt = simple_udp_packet(
eth_dst=self.mac_src,
eth_src=self.mac_dst,
ip_src=self.ip_dst,
ip_dst=self.ip_src)
-
+
send_packet(self, self.port_dst, pkt, 5)
-
+
for x in range(self.pkt_count):
sport = random.randint(0, 65535)
dport = random.randint(0, 65535)
-
+
pkt = simple_udp_packet(
eth_dst=self.mac_dst,
eth_src=self.mac_src,
@@ -102,7 +114,7 @@ def runTest(self):
udp_sport=sport,
udp_dport=dport,
ip_ttl=64)
-
+
pkt_bg = simple_udp_packet(
eth_dst=self.mac_dst,
eth_src=self.mac_src,
@@ -112,7 +124,7 @@ def runTest(self):
udp_sport=sport,
udp_dport=dport,
ip_ttl=64)
-
+
exp_pkt = simple_udp_packet(
ip_src=self.ip_src,
ip_dst=self.ip_dst,
@@ -120,30 +132,37 @@ def runTest(self):
udp_sport=sport,
udp_dport=dport,
ip_ttl=63)
-
+
masked_exp_pkt = Mask(exp_pkt)
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "tos")
-
+
send_packet(self, self.port_src, pkt, 1)
send_packet(self, self.port_src, pkt_bg, 1)
-
+
pkts = capture_matched_packets(self, masked_exp_pkt, self.port_dst)
-
+
+ if self.debug:
+ for i, pkt in enumerate(pkts):
+ dump_msg = "Iteration {}:\n Pkt num {}:\n Hex dump: {}\n\n".format(x, i, sc.utils.hexstr(pkt))
+ log_file.write(dump_msg)
+
time.sleep(self.pkt_intvl)
-
+
""" If the queue is paused, we should only receive the background packet """
if self.queue_paused:
pass_cnt += int(len(pkts) == 1 and scapy.Ether(pkts[0])[scapy.IP].tos == tos_bg)
else:
pass_cnt += int(len(pkts) == 2)
-
+
+ if self.debug:
+ log_file.close()
print "Passes: %d / %d" % (pass_cnt, self.pkt_count)
-
- def tearDown(self):
+
+ def tearDown(self):
reset_filters()
BaseTest.tearDown(self)
diff --git a/ansible/roles/test/files/ptftests/vxlan-decap.py b/ansible/roles/test/files/ptftests/vxlan-decap.py
index 21ff40fcc18..1e0c333511f 100644
--- a/ansible/roles/test/files/ptftests/vxlan-decap.py
+++ b/ansible/roles/test/files/ptftests/vxlan-decap.py
@@ -10,9 +10,10 @@
# 1. 'config_file' is a filename of a file which contains all necessary information to run the test. The file is populated by ansible. This parameter is mandatory.
# 2. 'vxlan_enabled' is a boolean parameter. When the parameter is true the test will fail if vxlan test failing. When the parameter is false the test will not fail. By default this parameter is false.
# 3. 'count' is an integer parameter. It defines how many packets are sent for each combination of ingress/egress interfaces. By default the parameter equal to 1
-# 4. 'dut_host' is the ip address of dut.
+# 4. 'dut_hostname' is the name of dut.
# 5. 'sonic_admin_user': User name to login dut
# 6. 'sonic_admin_password': Password for sonic_admin_user to login dut
+# 7. 'sonic_admin_alt_password': Alternate Password for sonic_admin_user to login dut
import sys
import os.path
@@ -45,13 +46,13 @@ def count_matched_packets_helper(test, exp_packet, exp_packet_number, port, devi
raise Exception("%s() requires positive timeout value." % sys._getframe().f_code.co_name)
total_rcv_pkt_cnt = 0
- while True:
- result = dp_poll(test, device_number=device_number, port_number=port, timeout=timeout)
+ end_time = time.time() + timeout
+ while time.time() < end_time:
+ result = dp_poll(test, device_number=device_number, port_number=port, timeout=timeout, exp_pkt=exp_packet)
if isinstance(result, test.dataplane.PollSuccess):
- if ptf.dataplane.match_exp_pkt(exp_packet, result.packet):
- total_rcv_pkt_cnt += 1
- if total_rcv_pkt_cnt == exp_packet_number:
- break
+ total_rcv_pkt_cnt += 1
+ if total_rcv_pkt_cnt == exp_packet_number:
+ break
else:
break
@@ -170,9 +171,9 @@ def setUp(self):
raise Exception("required parameter 'config_file' is not present")
config = self.test_params['config_file']
- if 'dut_host' not in self.test_params:
- raise Exception("required parameter 'dut_host' is not present")
- self.dut_host = self.test_params['dut_host']
+ if 'dut_hostname' not in self.test_params:
+ raise Exception("required parameter 'dut_hostname' is not present")
+ self.dut_hostname = self.test_params['dut_hostname']
if 'sonic_admin_user' not in self.test_params:
raise Exception("required parameter 'sonic_admin_user' is not present")
@@ -182,6 +183,10 @@ def setUp(self):
raise Exception("required parameter 'sonic_admin_password' is not present")
self.sonic_admin_password = self.test_params['sonic_admin_password']
+ if 'sonic_admin_alt_password' not in self.test_params:
+ raise Exception("required parameter 'sonic_admin_alt_password' is not present")
+ self.sonic_admin_alt_password = self.test_params['sonic_admin_alt_password']
+
if not os.path.isfile(config):
raise Exception("the config file %s doesn't exist" % config)
@@ -252,9 +257,10 @@ def setUp(self):
time.sleep(10)
self.dataplane.flush()
self.dut_connection = DeviceConnection(
- self.dut_host,
+ self.dut_hostname,
self.sonic_admin_user,
- password=self.sonic_admin_password
+ password=self.sonic_admin_password,
+ alt_password=self.sonic_admin_alt_password
)
return
diff --git a/ansible/roles/test/files/ptftests/wr_arp.py b/ansible/roles/test/files/ptftests/wr_arp.py
index 531444be56e..73b379adff6 100644
--- a/ansible/roles/test/files/ptftests/wr_arp.py
+++ b/ansible/roles/test/files/ptftests/wr_arp.py
@@ -271,7 +271,11 @@ def runTest(self):
self.assertTrue(time.time() < self.stop_at, "warm-reboot took to long")
- test_port_thr.join()
+ test_port_thr.join(timeout=self.how_long)
+ if test_port_thr.isAlive():
+ self.log("Timed out waiting for warm reboot")
+ self.req_dut('quit')
+ self.assertTrue(False, "Timed out waiting for warm reboot")
uptime_after = self.req_dut('uptime')
if uptime_after.startswith('error'):
diff --git a/ansible/roles/vm_set/files/mux_simulator.md b/ansible/roles/vm_set/files/mux_simulator.md
index 0eb91c78349..3e851c70770 100644
--- a/ansible/roles/vm_set/files/mux_simulator.md
+++ b/ansible/roles/vm_set/files/mux_simulator.md
@@ -62,6 +62,28 @@ sudo systemctl restart mux-simulator
The mux-simulator service is shared by multiple dualtor test setups using the same test server. Any dualtor test setups using it is recorded in a persistent file on test server `{{ root_path }}/mux_simulator.setups.txt`. During `testbed-cli.sh add-topo`, the vm set name of current setup will be added into it. During `testbed-cli.sh remove-topo`, the vm set name of current setup will be removed from it. When the file is empty, the mux-simulator service will be stopped.
+## How to troubleshoot mux simulator
+By default, the mux-simulator service output its logs to `/tmp/mux_simulator.log`. Default debug level is INFO. If DEBUG level logging is needed for troubleshooting, please follow below steps:
+
+1. Stop the mux-simulator service.
+```
+sudo systemctl stop mux-simulator
+```
+2. Find out path of the mux_simulator.py script from the mux-simulator systemd service file.
+```
+cat /etc/systemd/system/mux-simulator.service
+```
+3. Manually run the mux_simulator.py script with `-v` option to **turn on DEBUG level logging**.
+```
+ sudo /usr/bin/env python /home/azure/veos-vm/mux_simulator.py 8080 -v
+```
+4. Try to call the mux simulator HTTP APIs and check the log file `/tmp/mux_simulator.log` for detailed logging.
+5. After troubleshooting is done, stop the manually started mux_simulator.py script (for example: Ctrl+C).
+6. Start the mux-simulator service again.
+```
+sudo systemctl start mux-simulator
+```
+
## APIs
The APIs using json for data exchange.
diff --git a/ansible/roles/vm_set/files/mux_simulator.py b/ansible/roles/vm_set/files/mux_simulator.py
index 2103b60fb03..a34608f7f18 100644
--- a/ansible/roles/vm_set/files/mux_simulator.py
+++ b/ansible/roles/vm_set/files/mux_simulator.py
@@ -15,9 +15,11 @@
import subprocess
import sys
+from logging.handlers import RotatingFileHandler
from collections import defaultdict
from flask import Flask, request, jsonify
+from flask.logging import default_handler
app = Flask(__name__)
@@ -27,12 +29,6 @@
NIC = 'nic'
-logging.basicConfig(
- filename='/tmp/mux_simulator.log',
- level=logging.INFO,
- format='%(asctime)s %(levelname)s %(message)s')
-
-
def run_cmd(cmdline):
"""Use subprocess to run a command line with shell=True
@@ -402,8 +398,13 @@ def get_mux_bridges(vm_set):
"""
bridge_prefix = 'mbr-{}-'.format(vm_set)
mux_bridges = [intf for intf in os.listdir('/sys/class/net') if intf.startswith(bridge_prefix)]
+ valid_mux_bridges = []
+ for mux_bridge in mux_bridges:
+ out = run_cmd('ovs-vsctl list-ports {}'.format(mux_bridge))
+ if len(out.splitlines()) ==3:
+ valid_mux_bridges.append(mux_bridge)
- return mux_bridges
+ return valid_mux_bridges
def get_all_mux_status(vm_set):
@@ -606,13 +607,29 @@ def mux_cable_flow_update(vm_set, port_index, action):
return jsonify({'err_msg': err_msg}), 500
+def config_logging():
+ rfh = RotatingFileHandler(
+ '/tmp/mux_simulator.log',
+ maxBytes=1024*1024,
+ backupCount=5)
+ fmt = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
+ rfh.setFormatter(fmt)
+ rfh.setLevel(logging.INFO)
+ app.logger.addHandler(rfh)
+ app.logger.removeHandler(default_handler)
+
+
if __name__ == '__main__':
usage = '''
Start mux simulator server at specified port.
$ sudo python
'''
+ config_logging()
+
if '-v' in sys.argv:
app.logger.setLevel(logging.DEBUG)
+ for handler in app.logger.handlers:
+ handler.setLevel(logging.DEBUG)
if len(sys.argv) < 2:
app.logger.error(usage)
diff --git a/ansible/roles/vm_set/tasks/kickstart_vm.yml b/ansible/roles/vm_set/tasks/kickstart_vm.yml
index 2fdd6a460d7..c9cbdd556c4 100644
--- a/ansible/roles/vm_set/tasks/kickstart_vm.yml
+++ b/ansible/roles/vm_set/tasks/kickstart_vm.yml
@@ -27,6 +27,46 @@
delay: 10
ignore_errors: true
+ - name: Respin failed vm
+ include_tasks: respin_vm.yml
+ vars:
+ src_disk_image: "{{ home_path }}/{{ root_path }}/images/{{ hdd_image_filename }}"
+ disk_image: "{{ home_path }}/{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
+ cdrom_image: "{{ home_path }}/{{ root_path }}/images/{{ cd_image_filename }}"
+ when: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code != 0'
+ ignore_errors: true
+
+ - name: Kickstart gives error after respin vm {{ vm_name }}
+ set_fact:
+ kickstart_failed_vms: "{{ kickstart_failed_vms + [vm_name] }}"
+ when: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code != 0'
+
+ - name: Set VM to autostart
+ command: "virsh autostart {{ vm_name }}"
+ become: yes
+ when: autostart|bool == true
+
+ when: not skip_this_vm and (vm_type | lower) == "veos"
+
+- block:
+ - set_fact:
+ num_asic: "{{ hostvars[vm_name]['num_asics'] | default(1) }}"
+
+ - name: Wait until vm {{ vm_name }} is loaded
+ sonic_kickstart: telnet_port={{ serial_port }}
+ login={{ sonic_login }}
+ passwords={{ sonic_default_passwords }}
+ hostname={{ hostname }}
+ mgmt_ip="{{ mgmt_ip_address }}/{{ mgmt_prefixlen }}"
+ mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }}
+ new_password={{ sonic_password }}
+ num_asic={{ num_asic }}
+ register: kickstart_output
+ until: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code == 0'
+ retries: 5
+ delay: 10
+ ignore_errors: true
+
- name: Destroy vm {{ vm_name }} if it hangs
virt: name={{ vm_name }}
command=destroy
@@ -44,15 +84,14 @@
ignore_errors: true
- name: Wait until vm {{ vm_name }} is loaded
- kickstart: telnet_port={{ serial_port }}
- login={{ eos_default_login }}
- password={{ eos_default_password }}
+ sonic_kickstart: telnet_port={{ serial_port }}
+ login={{ sonic_login }}
+ passwords={{ sonic_default_passwords }}
hostname={{ hostname }}
mgmt_ip="{{ mgmt_ip_address }}/{{ mgmt_prefixlen }}"
mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }}
- new_login={{ eos_login }}
- new_password={{ eos_password }}
- new_root_password={{ eos_root_password }}
+ new_password={{ sonic_password }}
+ num_asic={{ num_asic }}
register: kickstart_output_final
until: '"kickstart_code" in kickstart_output_final and kickstart_output_final.kickstart_code == 0'
retries: 5
@@ -70,4 +109,4 @@
become: yes
when: autostart|bool == true
- when: not skip_this_vm
+ when: not skip_this_vm and (vm_type | lower) == "sonic"
\ No newline at end of file
diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml
index fe137207de2..ce392c94514 100644
--- a/ansible/roles/vm_set/tasks/main.yml
+++ b/ansible/roles/vm_set/tasks/main.yml
@@ -1,4 +1,4 @@
-# This role creates a set of VM with veos or Ubuntu for Kubernetes master
+# This role creates a set of VM with veos or SONiC or Ubuntu for Kubernetes master
# Input parameters for the role:
# - action: 'start', 'stop' or 'renumber' for creating, removeing, or renumbering vm set respectively
# - id: sequence number for vm set on the host.
@@ -166,19 +166,19 @@
when: home_path is not defined
- debug: msg="{{ home_path }}"
-- name: Require veos VMs by default
+- name: Require veos or SONiC VMs by default
set_fact:
- veos_vm_required: true
+ vm_required: true
-- name: veos VMs not needed when setting up Kubernetes master
+- name: veos or SONiC VMs not needed when setting up Kubernetes master
set_fact:
- veos_vm_required: false
+ vm_required: false
when:
- k8s is defined
- name: VMs not needed in case of Keysight API Server
set_fact:
- veos_vm_required: false
+ vm_required: false
when:
- ptf_imagename is defined
- ptf_imagename == "docker-keysight-api-server"
@@ -204,6 +204,17 @@
become: true
- block:
+
+ - name: Require VMs as VEOS by default
+ set_fact:
+ vm_type: "veos"
+ when: vm_type is not defined
+
+ - name: Check VM type
+ fail:
+ msg: "Cannot support this VM type {{ vm_type }}"
+ when: vm_type not in supported_vm_types
+
- name: Ensure {{ root_path }} exists
file: path={{ root_path }} state=directory
@@ -252,7 +263,7 @@
include_tasks: renumber_topo.yml
when: action == 'renumber_topo'
- when: veos_vm_required is defined and veos_vm_required == True
+ when: vm_required is defined and vm_required == True
- name: Add topology
include_tasks: add_topo.yml
diff --git a/ansible/roles/vm_set/tasks/respin_vm.yml b/ansible/roles/vm_set/tasks/respin_vm.yml
new file mode 100644
index 00000000000..b70b6b6453a
--- /dev/null
+++ b/ansible/roles/vm_set/tasks/respin_vm.yml
@@ -0,0 +1,52 @@
+---
+# This playbook will respin a specific vm
+
+- name: Destroy VM {{ vm_name }}
+ virt: name={{ vm_name }}
+ state=destroyed
+ uri=qemu:///system
+ become: yes
+ ignore_errors: true
+
+- name: Undefine VM {{ vm_name }}
+ virt: name={{ vm_name }}
+ command=undefine
+ uri=qemu:///system
+ become: yes
+ ignore_errors: true
+
+- name: Remove arista disk image for {{ vm_name }}
+ file: path={{ disk_image }} state=absent
+
+- name: Copy arista disk image for {{ vm_name }}
+ copy: src={{ src_disk_image }} dest={{ disk_image }} remote_src=True
+
+- name: Define vm {{ vm_name }}
+ virt: name={{ vm_name }}
+ command=define
+ xml="{{ lookup('template', 'templates/arista.xml.j2') }}"
+ uri=qemu:///system
+ become: yes
+
+- name: Start vm {{ vm_name }}
+ virt: name={{ vm_name }}
+ state=running
+ uri=qemu:///system
+ become: yes
+ ignore_errors: true
+
+- name: Wait until vm {{ vm_name }} is loaded
+ kickstart: telnet_port={{ serial_port }}
+ login={{ eos_default_login }}
+ password={{ eos_default_password }}
+ hostname={{ hostname }}
+ mgmt_ip="{{ mgmt_ip_address }}/{{ mgmt_prefixlen }}"
+ mgmt_gw={{ vm_mgmt_gw | default(mgmt_gw) }}
+ new_login={{ eos_login }}
+ new_password={{ eos_password }}
+ new_root_password={{ eos_root_password }}
+ register: kickstart_output
+ until: '"kickstart_code" in kickstart_output and kickstart_output.kickstart_code == 0'
+ retries: 5
+ delay: 10
+ ignore_errors: true
diff --git a/ansible/roles/vm_set/tasks/start.yml b/ansible/roles/vm_set/tasks/start.yml
index 46ef53932bc..00d2e58f1c2 100644
--- a/ansible/roles/vm_set/tasks/start.yml
+++ b/ansible/roles/vm_set/tasks/start.yml
@@ -1,3 +1,8 @@
+- name: Require VMs as VEOS by default
+ set_fact:
+ vm_type: "veos"
+ when: vm_type is not defined
+
- name: Load topo variables
include_vars: "vars/topo_{{ topo }}.yml"
when: topo is defined
@@ -12,29 +17,51 @@
- "{{ root_path }}/images"
- "{{ root_path }}/disks"
-- name: Check hdd image
- stat: path={{ root_path }}/images/{{ hdd_image_filename }}
- register: hdd_stat
+- block:
+
+ - name: Check hdd image
+ stat: path={{ root_path }}/images/{{ hdd_image_filename }}
+ register: hdd_stat
+
+ - name: Fail if there are no hdd image and skip image downloading is active
+ fail: msg="Please put {{ hdd_image_filename }} to {{ root_path }}/images"
+ when: not hdd_stat.stat.exists and skip_image_downloading
+
+ - name: Download hdd image
+ get_url: url="{{ vm_images_url }}/{{ hdd_image_filename }}?{{ vmimage_saskey }}" dest="{{ root_path }}/images/{{ hdd_image_filename }}"
+ when: not hdd_stat.stat.exists and not skip_image_downloading
-- name: Fail if there are no hdd image and skip image downloading is active
- fail: msg="Please put {{ hdd_image_filename }} to {{ root_path }}/images"
- when: not hdd_stat.stat.exists and skip_image_downloading
+ - name: Check cd image
+ stat: path={{ root_path }}/images/{{ cd_image_filename }}
+ register: cd_stat
+
+ - name: Fail if there are no cd image and skip image downloading is active
+ fail: msg="Please put {{ cd_image_filename }} to {{ root_path }}/images"
+ when: not cd_stat.stat.exists and skip_image_downloading
+
+ - name: Download cd image
+ get_url: url="{{ vm_images_url }}/{{ cd_image_filename }}?{{ cdimage_saskey }}" dest="{{ root_path }}/images/{{ cd_image_filename }}"
+ when: not cd_stat.stat.exists and not skip_image_downloading
+
+ - set_fact:
+ src_image_name: "{{ hdd_image_filename }}"
+
+ when: (vm_type | lower) == "veos"
+
+- block:
-- name: Download hdd image
- get_url: url="{{ vm_images_url }}/{{ hdd_image_filename }}?{{ vmimage_saskey }}" dest="{{ root_path }}/images/{{ hdd_image_filename }}"
- when: not hdd_stat.stat.exists and not skip_image_downloading
+ - name: Check SONiC image
+ stat: path={{ root_path }}/images/{{ sonic_image_filename }}
+ register: img_stat
-- name: Check cd image
- stat: path={{ root_path }}/images/{{ cd_image_filename }}
- register: cd_stat
+ - name: Fail if there are no SONiC image and skip image downloading is active
+ fail: msg="Please put {{ sonic_image_filename }} to {{ root_path }}/images"
+ when: not img_stat.stat.exists
-- name: Fail if there are no cd image and skip image downloading is active
- fail: msg="Please put {{ cd_image_filename }} to {{ root_path }}/images"
- when: not cd_stat.stat.exists and skip_image_downloading
+ - set_fact:
+ src_image_name: "{{ sonic_image_filename }}"
-- name: Download cd image
- get_url: url="{{ vm_images_url }}/{{ cd_image_filename }}?{{ cdimage_saskey }}" dest="{{ root_path }}/images/{{ cd_image_filename }}"
- when: not cd_stat.stat.exists and not skip_image_downloading
+ when: (vm_type | lower) == "sonic"
- name: Create VMs network
become: yes
@@ -66,8 +93,8 @@
hostname: "{{ vm_name }}"
mgmt_ip_address: "{{ hostvars[vm_name]['ansible_host'] }}"
serial_port: "{{ vm_console_base|int + vm_name[4:]|int }}"
- src_disk_image: "{{ root_path }}/images/{{ hdd_image_filename }}"
- disk_image: "{{ home_path }}/{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
+ src_disk_image: "{{ root_path }}/images/{{ src_image_name }}"
+ disk_image_dir: "{{ home_path }}/{{ root_path }}/disks"
cdrom_image: "{{ home_path }}/{{ root_path }}/images/{{ cd_image_filename }}"
mgmt_tap: "{{ vm_name }}-m"
backplane_tap: "{{ vm_name }}-back"
@@ -83,8 +110,8 @@
hostname: "{{ vm_name }}"
mgmt_ip_address: "{{ hostvars[vm_name]['ansible_host'] }}"
serial_port: "{{ vm_console_base|int + vm_name[4:]|int }}"
- src_disk_image: "{{ root_path }}/images/{{ hdd_image_filename }}"
- disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
+ src_disk_image: "{{ root_path }}/images/{{ src_image_name }}"
+ disk_image_dir: "{{ root_path }}/disks"
cdrom_image: "{{ root_path }}/images/{{ cd_image_filename }}"
mgmt_tap: "{{ vm_name }}-m"
backplane_tap: "{{ vm_name }}-back"
diff --git a/ansible/roles/vm_set/tasks/start_vm.yml b/ansible/roles/vm_set/tasks/start_vm.yml
index 2c85a13b17c..e0fd9591710 100644
--- a/ansible/roles/vm_set/tasks/start_vm.yml
+++ b/ansible/roles/vm_set/tasks/start_vm.yml
@@ -11,12 +11,25 @@
# After respining individual VMs, the affected topology needs to
# be removed and deployed again.
+- set_fact:
+ disk_image_name: "{{ vm_type }}_{{ vm_name }}_hdd.vmdk"
+ vm_xml_template: "arista.xml.j2"
+ when: (vm_type | lower) == "veos"
+
+- set_fact:
+ disk_image_name: "{{ vm_type }}_{{ vm_name }}.img"
+ vm_xml_template: "sonic_vm.xml.j2"
+ when: (vm_type | lower) == "sonic"
+
+- set_fact:
+ disk_image: "{{ disk_image_dir }}/{{ disk_image_name }}"
+
- set_fact:
respin_vms: []
when: respin_vms is not defined
- name: Device debug output
- debug: msg="hostname = {{ hostname }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}"
+ debug: msg="hostname = {{ hostname }} vm_type = {{ vm_type }} serial port = {{ serial_port }} ip = {{ mgmt_ip_address }}"
- name: Check destination file existance
stat: path={{ disk_image }}
@@ -29,7 +42,7 @@
- name: Define vm {{ vm_name }}
virt: name={{ vm_name }}
command=define
- xml="{{ lookup('template', 'templates/arista.xml.j2') }}"
+ xml="{{ lookup('template', 'templates/{{ vm_xml_template }}') }}"
uri=qemu:///system
when: vm_name not in vm_list_defined.list_vms
become: yes
diff --git a/ansible/roles/vm_set/tasks/stop.yml b/ansible/roles/vm_set/tasks/stop.yml
index 515ad485bf2..2118d16dc24 100644
--- a/ansible/roles/vm_set/tasks/stop.yml
+++ b/ansible/roles/vm_set/tasks/stop.yml
@@ -10,7 +10,7 @@
include_tasks: stop_vm.yml
vars:
vm_name: "{{ item }}"
- disk_image: "{{ root_path }}/disks/{{ vm_name }}_hdd.vmdk"
+ disk_image_dir: "{{ root_path }}/disks"
with_items: "{{ VM_hosts }}"
- name: Destroy VMs network
diff --git a/ansible/roles/vm_set/tasks/stop_vm.yml b/ansible/roles/vm_set/tasks/stop_vm.yml
index 4552d4af7ad..509da0b65b4 100644
--- a/ansible/roles/vm_set/tasks/stop_vm.yml
+++ b/ansible/roles/vm_set/tasks/stop_vm.yml
@@ -1,3 +1,14 @@
+- set_fact:
+ disk_image_name: "{{ vm_type }}_{{ vm_name }}_hdd.vmdk"
+ when: (vm_type | lower) == "veos"
+
+- set_fact:
+ disk_image_name: "{{ vm_type }}_{{ vm_name }}.img"
+ when: (vm_type | lower) == "sonic"
+
+- set_fact:
+ disk_image: "{{ disk_image_dir }}/{{ disk_image_name }}"
+
- name: Destroy VM {{ vm_name }}
virt: name={{ vm_name }}
state=destroyed
@@ -12,6 +23,6 @@
when: vm_name in vm_list_defined.list_vms
become: yes
-- name: Remove arista disk image for {{ vm_name }}
+- name: Remove {{ vm_type }} disk image for {{ vm_name }}
file: path={{ disk_image }} state=absent
diff --git a/ansible/roles/vm_set/templates/sonic.xml.j2 b/ansible/roles/vm_set/templates/sonic.xml.j2
index 54a97791aaf..adc1f333e0a 100644
--- a/ansible/roles/vm_set/templates/sonic.xml.j2
+++ b/ansible/roles/vm_set/templates/sonic.xml.j2
@@ -1,8 +1,11 @@
{{ dut_name }}
-{% if hwsku == 'msft_multi_asic_vs' %}
+{% if hwsku == 'msft_four_asic_vs' %}
8
- 8
+ 10
+{% elif hwsku == 'msft_multi_asic_vs' %}
+ 8
+ 16
{% else %}
30720003072000
diff --git a/ansible/roles/vm_set/templates/sonic_vm.xml.j2 b/ansible/roles/vm_set/templates/sonic_vm.xml.j2
new file mode 100644
index 00000000000..4a92647475e
--- /dev/null
+++ b/ansible/roles/vm_set/templates/sonic_vm.xml.j2
@@ -0,0 +1,54 @@
+
+ {{ vm_name }}
+ 2097152
+ 2097152
+ 2
+
+ /machine
+
+
+ hvm
+
+
+
+
+
+
+
+ destroy
+ restart
+ restart
+
+ /usr/bin/qemu-system-x86_64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{% for fp_num in range(0, max_fp_num) %}
+
+
+
+
+
+
+{% endfor %}
+
+
+
+
+
+
+
+
+
diff --git a/ansible/templates/minigraph_cpg.j2 b/ansible/templates/minigraph_cpg.j2
index 8e556071873..1f49ecb28b9 100644
--- a/ansible/templates/minigraph_cpg.j2
+++ b/ansible/templates/minigraph_cpg.j2
@@ -14,6 +14,18 @@
103
+{% if vm_asic_ifnames is defined %}
+
+ false
+ {{ vm_asic_ifnames[vm][0].split('-')[1] }}
+ {{ vm_topo_config['vm'][vm]['bgp_ipv4'][dut_index|int] }}
+ {{ vm }}
+ {{ vm_topo_config['vm'][vm]['peer_ipv4'][dut_index|int] }}
+ 1
+ 10
+ 3
+
+{% endif %}
{% endif %}
{% if vm_topo_config['vm'][vm]['peer_ipv6'][dut_index|int] %}
@@ -25,7 +37,45 @@
103
+{% if vm_asic_ifnames is defined %}
+
+ {{ vm_asic_ifnames[vm][0].split('-')[1] }}
+ {{ vm_topo_config['vm'][vm]['bgp_ipv6'][dut_index|int] }}
+ {{ vm }}
+ {{ vm_topo_config['vm'][vm]['peer_ipv6'][dut_index|int] }}
+ 1
+ 10
+ 3
+
+{% endif %}
+{% endif %}
+{% endfor %}
+{% for asic in asic_topo_config %}
+{% for neigh_asic in asic_topo_config[asic]['neigh_asic'] %}
+{% if asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] %}
+
+ false
+ {{ asic }}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }}
+ {{ neigh_asic }}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] }}
+ 1
+ 0
+ 0
+
+{% endif %}
+{% if asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv6'][0] %}
+
+ {{ asic }}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }}
+ {{ neigh_asic }}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv6'][0] }}
+ 1
+ 0
+ 0
+
{% endif %}
+{% endfor %}
{% endfor %}
@@ -77,6 +127,35 @@
{% endif %}
{% endfor %}
{% endif %}
+{% for asic in asic_topo_config %}
+
+ {{ vm_topo_config['dut_asn'] }}
+ {{ asic }}
+
+{% for index in range( vms_number) %}
+{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %}
+
+ {{ vm_topo_config['vm'][vms[index]]['peer_ipv4'][dut_index|int] }}
+
+
+
+
+{% endif %}
+{% endfor %}
+{% for neigh_asic in asic_topo_config %}
+{% if neigh_asic in asic_topo_config[asic]['neigh_asic'] and asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] %}
+
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['peer_ipv4'][0] }}
+
+
+
+
+{% endif %}
+{% endfor %}
+
+
+
+{% endfor %}
diff --git a/ansible/templates/minigraph_dpg.j2 b/ansible/templates/minigraph_dpg.j2
index 9b6d1db4741..68f4a204498 100644
--- a/ansible/templates/minigraph_dpg.j2
+++ b/ansible/templates/minigraph_dpg.j2
@@ -76,6 +76,16 @@
{% endif %}
{% endfor %}
+{% if 'tor' in vm_topo_config['dut_type'] | lower %}
+{% for portchannel, params in portchannel_config.items() %}
+{% set port_channel_intf=';'.join(params['intfs'] | map('extract', port_alias)) %}
+
+ {{ portchannel }}
+ {{ port_channel_intf }}
+
+
+{% endfor %}
+{% endif %}
{% if tunnel_configs | length > 0 %}
@@ -89,7 +99,7 @@
{% for vlan, vlan_param in vlan_configs.items() %}
{{ vlan }}
-{% set vlan_intf_str=';'.join(vlan_param['intfs']) %}
+{% set vlan_intf_str=';'.join(vlan_param['intfs'] + vlan_param['portchannels']) %}
{{ vlan_intf_str }}False0.0.0.0/0
@@ -200,5 +210,6 @@
+{% include 'minigraph_dpg_asic.j2' %}
diff --git a/ansible/templates/minigraph_dpg_asic.j2 b/ansible/templates/minigraph_dpg_asic.j2
new file mode 100644
index 00000000000..cef693288d2
--- /dev/null
+++ b/ansible/templates/minigraph_dpg_asic.j2
@@ -0,0 +1,208 @@
+{% macro port_channel_id(asic_idx, neigh_asic_idx) -%}
+{{ ((4000 + asic_idx + (10*neigh_asic_idx))|string) }}
+{%- endmacro -%}
+{% for asic in asic_topo_config %}
+{% set asic_index = asic.split('ASIC')[1]|int %}
+
+
+
+
+ HostIP
+ Loopback0
+
+ {{ lp_ipv4 }}
+
+ {{ lp_ipv4 }}
+
+
+ HostIP1
+ Loopback0
+
+ {{ lp_ipv6 }}
+
+ {{ lp_ipv6 }}
+
+{% for lo4096 in asic_topo_config[asic]['Loopback4096'] %}
+
+ HostIP1
+ Loopback4096
+
+ {{ lo4096 }}
+
+ {{ lo4096 }}
+
+{% endfor %}
+
+
+
+ HostIP
+ eth0
+
+ {{ ansible_host }}/{{ mgmt_subnet_mask_length }}
+
+ {{ ansible_host }}/{{ mgmt_subnet_mask_length }}
+
+
+ V6HostIP
+ eth0
+
+ {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64
+
+ {{ ansible_hostv6 if ansible_hostv6 is defined else 'FC00:2::32' }}/64
+
+
+
+
+
+
+ {{ asic }}
+
+{% for index in range(vms_number) %}
+{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %}
+{% if 'port-channel' in vm_topo_config['vm'][vms[index]]['ip_intf'][dut_index|int]|lower %}
+{% set port_channel_intf=';'.join(vm_asic_ifnames[vms[index]]) %}
+
+ PortChannel{{ ((index+1)|string).zfill(4) }}
+ {{ port_channel_intf }}
+
+
+{% endif %}
+{% endif %}
+{% endfor %}
+{% for neigh_asic in asic_topo_config %}
+{%- set pc_intfs = [] -%}
+{%- if neigh_asic in asic_topo_config[asic]['neigh_asic'] and 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower -%}
+{%- for intf in asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0] %}
+{{- pc_intfs.append(intf) }}
+{%- endfor -%}
+{%- set port_channel_intf=pc_intfs|join(';') -%}
+{% set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %}
+
+ PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(4) }}
+ {{ port_channel_intf }}
+
+
+{% endif %}
+{% endfor %}
+
+
+
+
+{% for index in range(vms_number) %}
+{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %}
+{% if vm_topo_config['vm'][vms[index]]['ip_intf'][dut_index|int] is not none %}
+
+
+{% if 'port-channel' in vm_topo_config['vm'][vms[index]]['ip_intf'][dut_index|int]|lower %}
+ PortChannel{{ ((index+1) |string).zfill(4) }}
+{% else %}
+ {{ front_panel_asic_ifnames[vm_topo_config['vm'][vms[index]]['interface_indexes'][dut_index|int][0]] }}
+{% endif %}
+ {{ vm_topo_config['vm'][vms[index]]['bgp_ipv4'][dut_index|int] }}/{{ vm_topo_config['vm'][vms[index]]['ipv4mask'][dut_index|int] }}
+
+
+
+{% if 'port-channel' in vm_topo_config['vm'][vms[index]]['ip_intf'][dut_index|int]|lower %}
+ PortChannel{{ ((index+1) |string).zfill(4) }}
+{% else %}
+ {{ front_panel_asic_ifnames[vm_topo_config['vm'][vms[index]]['interface_indexes'][dut_index|int][0]] }}
+{% endif %}
+ {{ vm_topo_config['vm'][vms[index]]['bgp_ipv6'][dut_index|int] }}/{{ vm_topo_config['vm'][vms[index]]['ipv6mask'][dut_index|int] }}
+
+{% endif %}
+{% endif %}
+{% endfor %}
+{% for neigh_asic in asic_topo_config[asic]['neigh_asic'] %}
+
+
+{%- if 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower %}
+{%- set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %}
+ PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(4) }}
+{% else %}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][0] }}
+{% endif %}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv4'][0] }}/{{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['ipv4mask'][0] }}
+
+
+
+{%- if 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower %}
+{%- set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %}
+ PortChannel{{ port_channel_id(asic_index, neigh_asic_index).zfill(4) }}
+{% else %}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][0] }}
+{% endif %}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['bgp_ipv6'][0] }}/{{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['ipv6mask'][0] }}
+
+{% endfor %}
+
+
+
+
+ SNMP_ACL
+ SNMP
+ SNMP
+
+
+ ERSPAN
+ Everflow
+ Everflow
+
+
+ ERSPANV6
+ EverflowV6
+ EverflowV6
+
+
+ VTY_LINE
+ ssh-only
+ SSH
+
+
+
+{%- set acl_intfs = [] -%}
+{%- for index in range(vms_number) %}
+{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %}
+{% if 'port-channel' in vm_topo_config['vm'][vms[index]]['ip_intf'][0]|lower %}
+{% set a_intf = 'PortChannel' + ((index+1) |string).zfill(4) %}
+{{- acl_intfs.append(a_intf) -}}
+{% endif %}
+{% endif %}
+{% endfor %}
+{% for neigh_asic in asic_topo_config %}
+{% set neigh_asic_index = neigh_asic.split('ASIC')[1]|int %}
+{%- if neigh_asic in asic_topo_config[asic]['neigh_asic'] and 'port-channel' in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower -%}
+{% set a_intf = 'PortChannel' + port_channel_id(asic_index, neigh_asic_index).zfill(4) %}
+{{- acl_intfs.append(a_intf) -}}
+{% endif %}
+{% endfor %}
+
+{%- for index in range(vms_number) -%}
+{% if vm_asic_ifnames[vms[index]][0].split('-')[1] == asic %}
+{% if 'port-channel' not in vm_topo_config['vm'][vms[index]]['ip_intf'][0]|lower %}
+{% if vm_topo_config['vm'][vms[index]]['intfs'][dut_index|int]|length %}
+{% set a_intf = front_panel_asic_ifnames[vm_topo_config['vm'][vms[index]]['interface_indexes'][dut_index|int][0]] %}
+{{- acl_intfs.append(a_intf) -}}
+{% endif %}
+{% endif %}
+{% endif %}
+{% endfor -%}
+{%- for neigh_asic in asic_topo_config -%}
+{%- if neigh_asic in asic_topo_config[asic]['neigh_asic'] and 'port-channel' not in asic_topo_config[asic]['neigh_asic'][neigh_asic]['ip_intf'][0]|lower -%}
+{% if asic_topo_config[asic]['neigh_asic'][neigh_asic]['intfs'][0]|length %}
+{% set a_intf = asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][0] %}
+{{- acl_intfs.append(a_intf) -}}
+{% endif %}
+{% endif %}
+{% endfor %}
+
+{{- acl_intfs|join(';') -}}
+
+ DataAcl
+ DataPlane
+
+
+
+
+
+{% endfor %}
+
diff --git a/ansible/templates/minigraph_meta.j2 b/ansible/templates/minigraph_meta.j2
index 30a8c875cf6..a44ce45a16d 100644
--- a/ansible/templates/minigraph_meta.j2
+++ b/ansible/templates/minigraph_meta.j2
@@ -93,6 +93,18 @@
{% endif %}
+{% for asic in asic_topo_config %}
+
+ {{ asic }}
+
+
+ SubRole
+
+ {{ asic_topo_config[asic]['asic_type'] }}
+
+
+
+{% endfor %}
diff --git a/ansible/templates/minigraph_png.j2 b/ansible/templates/minigraph_png.j2
index 451638b50ad..128e8cae204 100644
--- a/ansible/templates/minigraph_png.j2
+++ b/ansible/templates/minigraph_png.j2
@@ -36,6 +36,36 @@
{% endfor %}
{% endif %}
{% endif %}
+{% for asic in asic_topo_config %}
+{% for neigh_asic in asic_topo_config[asic]['neigh_asic'] %}
+{% for intf in asic_topo_config[asic]['neigh_asic'][neigh_asic]['intfs'][0] | sort %}
+
+ DeviceInterfaceLink
+ 40000
+ true
+ {{ neigh_asic }}
+ {{ intf }}
+ true
+ {{ asic }}
+ {{ asic_topo_config[asic]['neigh_asic'][neigh_asic]['asic_intfs'][0][loop.index-1] }}
+ true
+
+{% endfor %}
+{% endfor %}
+{% endfor %}
+{% for asic_intf in front_panel_asic_ifnames %}
+
+ DeviceInterfaceLink
+ 40000
+ true
+ {{ asic_intf.split('-')[1] }}
+ {{ asic_intf }}
+ true
+ {{ inventory_hostname }}
+ {{ port_alias[loop.index - 1] }}
+ true
+
+{% endfor %}
@@ -135,6 +165,33 @@
{% endif %}
{% endfor %}
{% endif %}
+{% for asic in asic_topo_config %}
+
+ Asic
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+
+
+
+
+
+
+
+ 0.0.0.0/0
+
+
+ ::/0
+
+
+ {{ asic }}
+ Broadcom-Trident2
+
+{% endfor %}
diff --git a/ansible/testbed-cli.sh b/ansible/testbed-cli.sh
index b19c94ab331..fdf80afb22d 100755
--- a/ansible/testbed-cli.sh
+++ b/ansible/testbed-cli.sh
@@ -12,13 +12,14 @@ function usage
echo " $0 [options] refresh-dut "
echo " $0 [options] (connect-vms | disconnect-vms) "
echo " $0 [options] config-vm "
+ echo " $0 [options] announce-routes "
echo " $0 [options] (gen-mg | deploy-mg | test-mg) "
echo " $0 [options] (create-master | destroy-master) "
echo
echo "Options:"
echo " -t : testbed CSV file name (default: 'testbed.csv')"
echo " -m : virtual machine file name (default: 'veos')"
- echo " -k : vm type (veos|ceos) (default: 'veos')"
+ echo " -k : vm type (veos|ceos|sonic) (default: 'veos')"
echo " -n : vm num (default: 0)"
echo " -s : master set identifier on specified (default: 1)"
echo " -d : sonic vm directory (default: $HOME/sonic-vm)"
@@ -42,6 +43,7 @@ function usage
echo "To start VMs for specified topology on server: $0 start-topo-vms 'topo-name' ~/.password"
echo "To stop all VMs on a server: $0 stop-vms 'server-name' ~/.password"
echo "To stop VMs for specified topology on server: $0 stop-topo-vms 'topo-name' ~/.password"
+ echo "To cleanup *all* vms and docker: $0 cleanup-vmhost 'server-name' ~/.password"
echo "To deploy a topology on a server: $0 add-topo 'topo-name' ~/.password"
echo " Optional argument for add-topo:"
echo " -e ptf_imagetag= # Use PTF image with specified tag for creating PTF container"
@@ -50,6 +52,7 @@ function usage
echo "To connect a topology: $0 connect-topo 'topo-name' ~/.password"
echo "To refresh DUT in a topology: $0 refresh-dut 'topo-name' ~/.password"
echo "To configure a VM on a server: $0 config-vm 'topo-name' 'vm-name' ~/.password"
+ echo "To announce routes to DUT: $0 announce-routes 'topo-name' ~/.password"
echo "To generate minigraph for DUT in a topology: $0 gen-mg 'topo-name' 'inventory' ~/.password"
echo "To deploy minigraph to DUT in a topology: $0 deploy-mg 'topo-name' 'inventory' ~/.password"
echo " gen-mg, deploy-mg, test-mg supports enabling/disabling data ACL with parameter"
@@ -97,6 +100,7 @@ function read_csv
vm_base=${line_arr[8]}
dut=${line_arr[9]//;/,}
duts=${dut//[\[\] ]/}
+ inventory=${line_arr[10]}
}
function read_yaml
@@ -137,6 +141,7 @@ function read_yaml
vm_base=${line_arr[8]}
dut=${line_arr[9]}
duts=$(python -c "from __future__ import print_function; print(','.join(eval(\"$dut\")))")
+ inventory=${line_arr[10]}
}
function read_file
@@ -160,7 +165,7 @@ function start_vms
shift
echo "Starting VMs on server '${server}'"
- ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e VM_num="$vm_num" testbed_start_VMs.yml \
+ ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e VM_num="$vm_num" -e vm_type="$vm_type" testbed_start_VMs.yml \
--vault-password-file="${passwd}" -l "${server}" $@
}
@@ -172,7 +177,7 @@ function stop_vms
shift
echo "Stopping VMs on server '${server}'"
- ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_stop_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@
+ ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e vm_type="$vm_type" testbed_stop_VMs.yml --vault-password-file="${passwd}" -l "${server}" $@
}
function start_topo_vms
@@ -340,6 +345,23 @@ function disconnect_vms
echo Done
}
+function announce_routes
+{
+ topology=$1
+ passfile=$2
+ shift
+ shift
+
+ echo "Announce routes '$topology'"
+
+ read_file $topology
+
+ ANSIBLE_SCP_IF_SSH=y ansible-playbook -i "$inventory" testbed_announce_routes.yml --vault-password-file="$passfile" \
+ -l "$server" -e vm_set_name="$vm_set_name" -e topo="$topo" -e ptf_ip="$ptf_ip" $@
+
+ echo done
+}
+
function generate_minigraph
{
topology=$1
@@ -442,6 +464,18 @@ function stop_k8s_vms
ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile testbed_stop_k8s_VMs.yml --vault-password-file="${passwd}" -l "${server}" -e k8s="true" $@
}
+function cleanup_vmhost
+{
+ server=$1
+ passwd=$2
+ shift
+ shift
+ echo "Cleaning vm_host server '${server}'"
+
+ ANSIBLE_SCP_IF_SSH=y ansible-playbook -i $vmfile -e VM_num="$vm_num" testbed_cleanup.yml \
+ --vault-password-file="${passwd}" -l "${server}" $@
+}
+
vmfile=veos
tbfile=testbed.csv
vm_type=veos
@@ -508,12 +542,16 @@ case "${subcmd}" in
;;
config-vm) config_vm $@
;;
+ announce-routes) announce_routes $@
+ ;;
gen-mg) generate_minigraph $@
;;
deploy-mg) deploy_minigraph $@
;;
test-mg) test_minigraph $@
;;
+ cleanup-vmhost) cleanup_vmhost $@
+ ;;
create-master) start_k8s_vms $@
setup_k8s_vms $@
;;
diff --git a/ansible/testbed.csv b/ansible/testbed.csv
index 3d0c7faf50e..53723f7746d 100644
--- a/ansible/testbed.csv
+++ b/ansible/testbed.csv
@@ -1,13 +1,13 @@
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-ptf1-m,ptf1,ptf32,docker-ptf,ptf-unknown,10.255.0.188/24,,server_1,,str-msn2700-01,Test ptf Mellanox
-ptf2-b,ptf2,ptf64,docker-ptf,ptf-unknown,10.255.0.189/24,,server_1,,lab-s6100-01,Test ptf Broadcom
-vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms
-vms-sn2700-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms
-vms-sn2700-t0,vms1-1,t0,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests Mellanox SN2700 vms
-vms-s6000-t0,vms2-1,t0,docker-ptf,ptf-unknown,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,Tests Dell S6000 vms
-vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf-unknown,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,Tests Arista A7260 vms
-vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf-unknown,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms
-vms-s6100-t1,vms4-1,t1-64,docker-ptf,ptf-unknown,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms
-vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf-unknown,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,Tests Dell S6100 vms
-vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf-unknown,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],Example Multi DUTs testbed
-vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,superman
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+ptf1-m,ptf1,ptf32,docker-ptf,ptf-unknown,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Test ptf Mellanox
+ptf2-b,ptf2,ptf64,docker-ptf,ptf-unknown,10.255.0.189/24,,server_1,,lab-s6100-01,lab,False,Test ptf Broadcom
+vms-sn2700-t1,vms1-1,t1,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms
+vms-sn2700-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms
+vms-sn2700-t0,vms1-1,t0,docker-ptf,ptf-unknown,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests Mellanox SN2700 vms
+vms-s6000-t0,vms2-1,t0,docker-ptf,ptf-unknown,10.255.0.179/24,,server_1,VM0100,lab-s6000-01,lab,True,Tests Dell S6000 vms
+vms-a7260-t0,vms3-1,t0-116,docker-ptf,ptf-unknown,10.255.0.180/24,,server_1,VM0100,lab-a7260-01,lab,True,Tests Arista A7260 vms
+vms-s6100-t0,vms4-1,t0-64,docker-ptf,ptf-unknown,10.255.0.181/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms
+vms-s6100-t1,vms4-1,t1-64,docker-ptf,ptf-unknown,10.255.0.182/24,,server_1,VM0100,lab-s6100-01,lab,True,Tests Dell S6100 vms
+vms-s6100-t1-lag,vms5-1,t1-64-lag,docker-ptf,ptf-unknown,10.255.0.183/24,,server_1,VM0100,lab-s6100-01,lab,True,ests Dell S6100 vms
+vms-multi-dut,vms1-duts,ptf64,docker-ptf,ptf-unknown,10.255.0.184/24,,server_1,VM0100,[dut-host1;dut-host2],lab,True,Example Multi DUTs testbed
+vms-example-ixia-1,vms6-1,t0-64,docker-ptf-ixia,example-ixia-ptf-1,10.0.0.30/32,,server_6,VM0600,example-s6100-dut-1,lab,True,superman
diff --git a/ansible/testbed_announce_routes.yml b/ansible/testbed_announce_routes.yml
new file mode 100644
index 00000000000..edc24eb9c7f
--- /dev/null
+++ b/ansible/testbed_announce_routes.yml
@@ -0,0 +1,40 @@
+# This Playbook announce IP routes to PTF docker container
+#
+# The PTF docker container has BGP sessions established with VMs. The VMs can learn IP routes from PTF docker and
+# populate the IP routes to DUT.
+#
+# To announce IP routes to PTF docker container, please use below command:
+# ANSIBLE_SCP_IF_SSH=y ansible-playbook -i veos_vtb testbed_announce_routes.yml --vault-password-file=~/.password -l server_1 -e vm_set_name=first -e ptf_ip=10.255.0.255/23 -e topo=t0
+#
+# Parameters
+# -i veos_vtb - inventory file
+# -l server_1 - this playbook have to be limited to run only on one server
+# -e vm_set_name=first - the name of vm_set
+# -e ptf_ip=10.255.0.255/23 - the ip address and prefix of ptf container mgmt interface
+# -e topo=t0 - the name of removed topo
+
+- hosts: servers:&vm_host
+ gather_facts: no
+ pre_tasks:
+
+ - name: Check that variable vm_set_name is defined
+ fail: msg="Define vm_set_name variable with -e vm_set_name=something"
+ when: vm_set_name is not defined
+
+ - name: Check that variable topo is defined
+ fail: msg="Define topo variable with -e topo=something"
+ when: topo is not defined
+
+ - name: Check if it is a known topology
+ fail: msg="Unknown topology {{ topo }}"
+ when: topo not in topologies
+
+ - name: Check that variable ptf_ip is defined
+ fail: msg="Define ptf ip variable with -e ptf_ip=something"
+ when: ptf_ip is not defined
+
+ - name: Load topo variables
+ include_vars: "vars/topo_{{ topo }}.yml"
+
+ tasks:
+ - include_tasks: roles/vm_set/tasks/announce_routes.yml
diff --git a/ansible/testbed_cleanup.yml b/ansible/testbed_cleanup.yml
new file mode 100644
index 00000000000..af79a5f318b
--- /dev/null
+++ b/ansible/testbed_cleanup.yml
@@ -0,0 +1,17 @@
+---
+# This playbook will cleanup a vm_host, including removing all veos, containers and net bridges.
+
+- hosts: servers:&vm_host
+ gather_facts: no
+ tasks:
+ - name: run apt update and upgrade
+ include_tasks: update_reboot.yml
+
+ - name: run apt update and upgrade again
+ include_tasks: update_reboot.yml
+ when: '"0 upgraded" not in apt_update_res.stdout'
+ with_items: '{{ range(0,3)|list }}'
+
+ - name: run cleanup script
+ shell: bash /home/azure/veos-vm/cleanup.sh
+ become: yes
diff --git a/ansible/update_reboot.yml b/ansible/update_reboot.yml
new file mode 100644
index 00000000000..3085cc372ef
--- /dev/null
+++ b/ansible/update_reboot.yml
@@ -0,0 +1,14 @@
+---
+# This playbook will update the apt cache, and do a reboot if cache was updates.
+
+- name: update apt cache
+ apt: update_cache=yes upgrade=yes
+ environment: "{{ proxy_env | default({}) }}"
+ register: apt_update_res
+ become: true
+
+- name: reboot vm_host
+ reboot: reboot_timeout=600
+ when: '"0 upgraded" not in apt_update_res.stdout'
+ become: true
+
diff --git a/ansible/vars/topo_dualtor-56.yml b/ansible/vars/topo_dualtor-56.yml
index a8cfcb5c0d4..ab474ebb613 100644
--- a/ansible/vars/topo_dualtor-56.yml
+++ b/ansible/vars/topo_dualtor-56.yml
@@ -54,7 +54,7 @@ topology:
- 0.3@3,1.3@3
- 0.5@5,1.5@5
- 0.7@7,1.7@7
- - 0.9@9,1.5@9
+ - 0.9@9,1.9@9
- 0.11@11,1.11@11
- 0.17@17,1.17@17
- 0.19@19,1.19@19
diff --git a/ansible/vars/topo_msft_four_asic_vs.yml b/ansible/vars/topo_msft_four_asic_vs.yml
new file mode 100644
index 00000000000..ee5bc27cb21
--- /dev/null
+++ b/ansible/vars/topo_msft_four_asic_vs.yml
@@ -0,0 +1,197 @@
+ASIC0:
+ topology:
+ NEIGH_ASIC:
+ ASIC2:
+ asic_intfs:
+ - Eth4-ASIC0
+ - Eth5-ASIC0
+ ASIC3:
+ asic_intfs:
+ - Eth6-ASIC0
+ - Eth7-ASIC0
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: FrontEnd
+ Loopback4096:
+ - 8.0.0.0/32
+ - 2603:10e2:400::/128
+ configuration:
+ ASIC2:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.1
+ - 2603:10e2:400:1::2
+ interfaces:
+ Eth0-ASIC2:
+ lacp: 1
+ Eth1-ASIC2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.0/31
+ ipv6: 2603:10e2:400:1::1/126
+ ASIC3:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.3
+ - 2603:10e2:400:1::6
+ interfaces:
+ Eth0-ASIC3:
+ lacp: 2
+ Eth1-ASIC3:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.2/31
+ ipv6: 2603:10e2:400:1::5/126
+ASIC1:
+ topology:
+ NEIGH_ASIC:
+ ASIC2:
+ asic_intfs:
+ - Eth4-ASIC1
+ - Eth5-ASIC1
+ ASIC3:
+ asic_intfs:
+ - Eth6-ASIC1
+ - Eth7-ASIC1
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: FrontEnd
+ Loopback4096:
+ - 8.0.0.1/32
+ - 2603:10e2:400::1/128
+ configuration:
+ ASIC2:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.5
+ - 2603:10e2:400:1::a
+ interfaces:
+ Eth2-ASIC2:
+ lacp: 1
+ Eth3-ASIC2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.4/31
+ ipv6: 2603:10e2:400:1::9/126
+ ASIC3:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.7
+ - 2603:10e2:400:1::e
+ interfaces:
+ Eth2-ASIC3:
+ lacp: 2
+ Eth3-ASIC3:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.6/31
+ ipv6: 2603:10e2:400:1::d/126
+ASIC2:
+ topology:
+ NEIGH_ASIC:
+ ASIC0:
+ asic_intfs:
+ - Eth0-ASIC2
+ - Eth1-ASIC2
+ ASIC1:
+ asic_intfs:
+ - Eth2-ASIC2
+ - Eth3-ASIC2
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: BackEnd
+ Loopback4096:
+ - 8.0.0.2/32
+ - 2603:10e2:400::2/128
+ configuration:
+ ASIC0:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.0
+ - 2603:10e2:400:1::1
+ interfaces:
+ Eth4-ASIC0:
+ lacp: 1
+ Eth5-ASIC0:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.1/31
+ ipv6: 2603:10e2:400:1::2/126
+ ASIC1:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.4
+ - 2603:10e2:400:1::9
+ interfaces:
+ Eth4-ASIC1:
+ lacp: 2
+ Eth5-ASIC1:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.5/31
+ ipv6: 2603:10e2:400:1::a/126
+ASIC3:
+ topology:
+ NEIGH_ASIC:
+ ASIC0:
+ asic_intfs:
+ - Eth0-ASIC3
+ - Eth1-ASIC3
+ ASIC1:
+ asic_intfs:
+ - Eth2-ASIC3
+ - Eth3-ASIC3
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: BackEnd
+ Loopback4096:
+ - 8.0.0.3/32
+ - 2603:10e2:400::3/128
+
+ configuration:
+ ASIC0:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.2
+ - 2603:10e2:400:1::5
+ interfaces:
+ Eth6-ASIC0:
+ lacp: 1
+ Eth7-ASIC0:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.3/31
+ ipv6: 2603:10e2:400:1::6/126
+ ASIC1:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.6
+ - 2603:10e2:400:1::d
+ interfaces:
+ Eth6-ASIC1:
+ lacp: 2
+ Eth7-ASIC1:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.7/31
+ ipv6: 2603:10e2:400:1::e/126
diff --git a/ansible/vars/topo_msft_multi_asic_vs.yml b/ansible/vars/topo_msft_multi_asic_vs.yml
new file mode 100644
index 00000000000..89b074c2084
--- /dev/null
+++ b/ansible/vars/topo_msft_multi_asic_vs.yml
@@ -0,0 +1,659 @@
+ASIC0:
+ topology:
+ NEIGH_ASIC:
+ ASIC4:
+ asic_intfs:
+ - Eth16-ASIC0
+ - Eth17-ASIC0
+ - Eth18-ASIC0
+ - Eth19-ASIC0
+ - Eth20-ASIC0
+ - Eth21-ASIC0
+ - Eth22-ASIC0
+ - Eth23-ASIC0
+ ASIC5:
+ asic_intfs:
+ - Eth24-ASIC0
+ - Eth25-ASIC0
+ - Eth26-ASIC0
+ - Eth27-ASIC0
+ - Eth28-ASIC0
+ - Eth29-ASIC0
+ - Eth30-ASIC0
+ - Eth31-ASIC0
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: FrontEnd
+ Loopback4096:
+ - 8.0.0.0/32
+ - 2603:10e2:400::/128
+ configuration:
+ ASIC4:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.1
+ - 2603:10e2:400:1::2
+ interfaces:
+ Eth0-ASIC4:
+ lacp: 1
+ Eth1-ASIC4:
+ lacp: 1
+ Eth2-ASIC4:
+ lacp: 1
+ Eth3-ASIC4:
+ lacp: 1
+ Eth4-ASIC4:
+ lacp: 1
+ Eth5-ASIC4:
+ lacp: 1
+ Eth6-ASIC4:
+ lacp: 1
+ Eth7-ASIC4:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.0/31
+ ipv6: 2603:10e2:400:1::1/126
+ ASIC5:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.3
+ - 2603:10e2:400:1::6
+ interfaces:
+ Eth0-ASIC5:
+ lacp: 2
+ Eth1-ASIC5:
+ lacp: 2
+ Eth2-ASIC5:
+ lacp: 2
+ Eth3-ASIC5:
+ lacp: 2
+ Eth4-ASIC5:
+ lacp: 2
+ Eth5-ASIC5:
+ lacp: 2
+ Eth6-ASIC5:
+ lacp: 2
+ Eth7-ASIC5:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.2/31
+ ipv6: 2603:10e2:400:1::5/126
+ASIC1:
+ topology:
+ NEIGH_ASIC:
+ ASIC4:
+ asic_intfs:
+ - Eth16-ASIC1
+ - Eth17-ASIC1
+ - Eth18-ASIC1
+ - Eth19-ASIC1
+ - Eth20-ASIC1
+ - Eth21-ASIC1
+ - Eth22-ASIC1
+ - Eth23-ASIC1
+ ASIC5:
+ asic_intfs:
+ - Eth24-ASIC1
+ - Eth25-ASIC1
+ - Eth26-ASIC1
+ - Eth27-ASIC1
+ - Eth28-ASIC1
+ - Eth29-ASIC1
+ - Eth30-ASIC1
+ - Eth31-ASIC1
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: FrontEnd
+ Loopback4096:
+ - 8.0.0.1/32
+ - 2603:10e2:400::1/128
+ configuration:
+ ASIC4:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.5
+ - 2603:10e2:400:1::a
+ interfaces:
+ Eth8-ASIC4:
+ lacp: 1
+ Eth9-ASIC4:
+ lacp: 1
+ Eth10-ASIC4:
+ lacp: 1
+ Eth11-ASIC4:
+ lacp: 1
+ Eth12-ASIC4:
+ lacp: 1
+ Eth13-ASIC4:
+ lacp: 1
+ Eth14-ASIC4:
+ lacp: 1
+ Eth15-ASIC4:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.4/31
+ ipv6: 2603:10e2:400:1::9/126
+ ASIC5:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.7
+ - 2603:10e2:400:1::e
+ interfaces:
+ Eth8-ASIC5:
+ lacp: 2
+ Eth9-ASIC5:
+ lacp: 2
+ Eth10-ASIC5:
+ lacp: 2
+ Eth11-ASIC5:
+ lacp: 2
+ Eth12-ASIC5:
+ lacp: 2
+ Eth13-ASIC5:
+ lacp: 2
+ Eth14-ASIC5:
+ lacp: 2
+ Eth15-ASIC5:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.6/31
+ ipv6: 2603:10e2:400:1::d/126
+ASIC2:
+ topology:
+ NEIGH_ASIC:
+ ASIC4:
+ asic_intfs:
+ - Eth16-ASIC2
+ - Eth17-ASIC2
+ - Eth18-ASIC2
+ - Eth19-ASIC2
+ - Eth20-ASIC2
+ - Eth21-ASIC2
+ - Eth22-ASIC2
+ - Eth23-ASIC2
+ ASIC5:
+ asic_intfs:
+ - Eth24-ASIC2
+ - Eth25-ASIC2
+ - Eth26-ASIC2
+ - Eth27-ASIC2
+ - Eth28-ASIC2
+ - Eth29-ASIC2
+ - Eth30-ASIC2
+ - Eth31-ASIC2
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: FrontEnd
+ Loopback4096:
+ - 8.0.0.2/32
+ - 2603:10e2:400::2/128
+ configuration:
+ ASIC4:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.9
+ - 2603:10e2:400:1::11
+ interfaces:
+ Eth24-ASIC4:
+ lacp: 1
+ Eth25-ASIC4:
+ lacp: 1
+ Eth26-ASIC4:
+ lacp: 1
+ Eth27-ASIC4:
+ lacp: 1
+ Eth28-ASIC4:
+ lacp: 1
+ Eth29-ASIC4:
+ lacp: 1
+ Eth30-ASIC4:
+ lacp: 1
+ Eth31-ASIC4:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.8/31
+ ipv6: 2603:10e2:400:1::12/126
+ ASIC5:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.11
+ - 2603:10e2:400:1::16
+ interfaces:
+ Eth24-ASIC5:
+ lacp: 2
+ Eth25-ASIC5:
+ lacp: 2
+ Eth26-ASIC5:
+ lacp: 2
+ Eth27-ASIC5:
+ lacp: 2
+ Eth28-ASIC5:
+ lacp: 2
+ Eth29-ASIC5:
+ lacp: 2
+ Eth30-ASIC5:
+ lacp: 2
+ Eth31-ASIC5:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.10/31
+ ipv6: 2603:10e2:400:1::15/126
+ASIC3:
+ topology:
+ NEIGH_ASIC:
+ ASIC4:
+ asic_intfs:
+ - Eth16-ASIC3
+ - Eth17-ASIC3
+ - Eth18-ASIC3
+ - Eth19-ASIC3
+ - Eth20-ASIC3
+ - Eth21-ASIC3
+ - Eth22-ASIC3
+ - Eth23-ASIC3
+ ASIC5:
+ asic_intfs:
+ - Eth24-ASIC3
+ - Eth25-ASIC3
+ - Eth26-ASIC3
+ - Eth27-ASIC3
+ - Eth28-ASIC3
+ - Eth29-ASIC3
+ - Eth30-ASIC3
+ - Eth31-ASIC3
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: FrontEnd
+ Loopback4096:
+ - 8.0.0.3/32
+ - 2603:10e2:400::3/128
+ configuration:
+ ASIC4:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.13
+ - 2603:10e2:400:1::1a
+ interfaces:
+ Eth24-ASIC4:
+ lacp: 1
+ Eth25-ASIC4:
+ lacp: 1
+ Eth26-ASIC4:
+ lacp: 1
+ Eth27-ASIC4:
+ lacp: 1
+ Eth28-ASIC4:
+ lacp: 1
+ Eth29-ASIC4:
+ lacp: 1
+ Eth30-ASIC4:
+ lacp: 1
+ Eth31-ASIC4:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.12/31
+ ipv6: 2603:10e2:400:1::19/126
+ ASIC5:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.15
+ - 2603:10e2:400:1::1e
+ interfaces:
+ Eth24-ASIC5:
+ lacp: 2
+ Eth25-ASIC5:
+ lacp: 2
+ Eth26-ASIC5:
+ lacp: 2
+ Eth27-ASIC5:
+ lacp: 2
+ Eth28-ASIC5:
+ lacp: 2
+ Eth29-ASIC5:
+ lacp: 2
+ Eth30-ASIC5:
+ lacp: 2
+ Eth31-ASIC5:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.14/31
+ ipv6: 2603:10e2:400:1::1d/126
+ASIC4:
+ topology:
+ NEIGH_ASIC:
+ ASIC0:
+ asic_intfs:
+ - Eth0-ASIC4
+ - Eth1-ASIC4
+ - Eth2-ASIC4
+ - Eth3-ASIC4
+ - Eth4-ASIC4
+ - Eth5-ASIC4
+ - Eth6-ASIC4
+ - Eth7-ASIC4
+ ASIC1:
+ asic_intfs:
+ - Eth8-ASIC4
+ - Eth9-ASIC4
+ - Eth10-ASIC4
+ - Eth11-ASIC4
+ - Eth12-ASIC4
+ - Eth13-ASIC4
+ - Eth14-ASIC4
+ - Eth15
+ ASIC2:
+ asic_intfs:
+ - Eth16-ASIC4
+ - Eth17-ASIC4
+ - Eth18-ASIC4
+ - Eth19-ASIC4
+ - Eth20-ASIC4
+ - Eth21-ASIC4
+ - Eth22-ASIC4
+ - Eth23-ASIC4
+ ASIC3:
+ asic_intfs:
+ - Eth24-ASIC4
+ - Eth25-ASIC4
+ - Eth26-ASIC4
+ - Eth27-ASIC4
+ - Eth28-ASIC4
+ - Eth29-ASIC4
+ - Eth30-ASIC4
+ - Eth31-ASIC4
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: BackEnd
+ Loopback4096:
+ - 8.0.0.4/32
+ - 2603:10e2:400::4/128
+ configuration:
+ ASIC0:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.0
+ - 2603:10e2:400:1::1
+ interfaces:
+ Eth16-ASIC0:
+ lacp: 1
+ Eth17-ASIC0:
+ lacp: 1
+ Eth18-ASIC0:
+ lacp: 1
+ Eth19-ASIC0:
+ lacp: 1
+ Eth20-ASIC0:
+ lacp: 1
+ Eth21-ASIC0:
+ lacp: 1
+ Eth22-ASIC0:
+ lacp: 1
+ Eth23-ASIC0:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.1/31
+ ipv6: 2603:10e2:400:1::2/126
+ ASIC1:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.4
+ - 2603:10e2:400:1::9
+ interfaces:
+ Eth16-ASIC1:
+ lacp: 2
+ Eth17-ASIC1:
+ lacp: 2
+ Eth18-ASIC1:
+ lacp: 2
+ Eth19-ASIC1:
+ lacp: 2
+ Eth20-ASIC1:
+ lacp: 2
+ Eth21-ASIC1:
+ lacp: 2
+ Eth22-ASIC1:
+ lacp: 2
+ Eth23-ASIC1:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.5/31
+ ipv6: 2603:10e2:400:1::a/126
+ ASIC2:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.8
+ - 2603:10e2:400:1::11
+ interfaces:
+ Eth16-ASIC2:
+ lacp: 3
+ Eth17-ASIC2:
+ lacp: 3
+ Eth18-ASIC2:
+ lacp: 3
+ Eth19-ASIC2:
+ lacp: 3
+ Eth20-ASIC2:
+ lacp: 3
+ Eth21-ASIC2:
+ lacp: 3
+ Eth22-ASIC2:
+ lacp: 3
+ Eth23-ASIC2:
+ lacp: 3
+ Port-Channel3:
+ ipv4: 10.1.0.9/31
+ ipv6: 2603:10e2:400:1::12/126
+ ASIC3:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.12
+ - 2603:10e2:400:1::19
+ interfaces:
+ Eth16-ASIC3:
+ lacp: 4
+ Eth17-ASIC3:
+ lacp: 4
+ Eth18-ASIC3:
+ lacp: 4
+ Eth19-ASIC3:
+ lacp: 4
+ Eth20-ASIC3:
+ lacp: 4
+ Eth21-ASIC3:
+ lacp: 4
+ Eth22-ASIC3:
+ lacp: 4
+ Eth23-ASIC3:
+ lacp: 4
+ Port-Channel4:
+ ipv4: 10.1.0.13/31
+ ipv6: 2603:10e2:400:1::1a/126
+ASIC5:
+ topology:
+ NEIGH_ASIC:
+ ASIC0:
+ asic_intfs:
+ - Eth0-ASIC5
+ - Eth1-ASIC5
+ - Eth2-ASIC5
+ - Eth3-ASIC5
+ - Eth4-ASIC5
+ - Eth5-ASIC5
+ - Eth6-ASIC5
+ - Eth7-ASIC5
+ ASIC1:
+ asic_intfs:
+ - Eth8-ASIC5
+ - Eth9-ASIC5
+ - Eth10-ASIC5
+ - Eth11-ASIC5
+ - Eth12-ASIC5
+ - Eth13-ASIC5
+ - Eth14-ASIC5
+ - Eth15-ASIC5
+ ASIC2:
+ asic_intfs:
+ - Eth16-ASIC5
+ - Eth17-ASIC5
+ - Eth18-ASIC5
+ - Eth19-ASIC5
+ - Eth20-ASIC5
+ - Eth21-ASIC5
+ - Eth22-ASIC5
+ - Eth23-ASIC5
+ ASIC3:
+ asic_intfs:
+ - Eth24-ASIC5
+ - Eth25-ASIC5
+ - Eth26-ASIC5
+ - Eth27-ASIC5
+ - Eth28-ASIC5
+ - Eth29-ASIC5
+ - Eth30-ASIC5
+ - Eth31-ASIC5
+ configuration_properties:
+ common:
+ dut_asn: 65100
+ asic_type: BackEnd
+ Loopback4096:
+ - 8.0.0.5/32
+ - 2603:10e2:400::5/128
+
+ configuration:
+ ASIC0:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.2
+ - 2603:10e2:400:1::5
+ interfaces:
+ Eth24-ASIC0:
+ lacp: 1
+ Eth25-ASIC0:
+ lacp: 1
+ Eth26-ASIC0:
+ lacp: 1
+ Eth27-ASIC0:
+ lacp: 1
+ Eth28-ASIC0:
+ lacp: 1
+ Eth29-ASIC0:
+ lacp: 1
+ Eth30-ASIC0:
+ lacp: 1
+ Eth31-ASIC0:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.1.0.3/31
+ ipv6: 2603:10e2:400:1::6/126
+ ASIC1:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.6
+ - 2603:10e2:400:1::d
+ interfaces:
+ Eth24-ASIC1:
+ lacp: 2
+ Eth25-ASIC1:
+ lacp: 2
+ Eth26-ASIC1:
+ lacp: 2
+ Eth27-ASIC1:
+ lacp: 2
+ Eth28-ASIC1:
+ lacp: 2
+ Eth29-ASIC1:
+ lacp: 2
+ Eth30-ASIC1:
+ lacp: 2
+ Eth31-ASIC1:
+ lacp: 2
+ Port-Channel2:
+ ipv4: 10.1.0.7/31
+ ipv6: 2603:10e2:400:1::e/126
+ ASIC2:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.10
+ - 2603:10e2:400:1::15
+ interfaces:
+ Eth24-ASIC2:
+ lacp: 3
+ Eth25-ASIC2:
+ lacp: 3
+ Eth26-ASIC2:
+ lacp: 3
+ Eth27-ASIC2:
+ lacp: 3
+ Eth28-ASIC2:
+ lacp: 3
+ Eth29-ASIC2:
+ lacp: 3
+ Eth30-ASIC2:
+ lacp: 3
+ Eth31-ASIC2:
+ lacp: 3
+ Port-Channel3:
+ ipv4: 10.1.0.11/31
+ ipv6: 2603:10e2:400:1::16/126
+ ASIC3:
+ bgp:
+ asn: 65100
+ peers:
+ 65100:
+ - 10.1.0.14
+ - 2603:10e2:400:1::1d
+ interfaces:
+ Eth24-ASIC3:
+ lacp: 4
+ Eth25-ASIC3:
+ lacp: 4
+ Eth26-ASIC3:
+ lacp: 4
+ Eth27-ASIC3:
+ lacp: 4
+ Eth28-ASIC3:
+ lacp: 4
+ Eth29-ASIC3:
+ lacp: 4
+ Eth30-ASIC3:
+ lacp: 4
+ Eth31-ASIC3:
+ lacp: 4
+ Port-Channel4:
+ ipv4: 10.1.0.15/31
+ ipv6: 2603:10e2:400:1::1e/126
diff --git a/ansible/vars/topo_t0-56-po2vlan.yml b/ansible/vars/topo_t0-56-po2vlan.yml
new file mode 100644
index 00000000000..87bcd069361
--- /dev/null
+++ b/ansible/vars/topo_t0-56-po2vlan.yml
@@ -0,0 +1,325 @@
+topology:
+ host_interfaces:
+ - 0
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - 6
+ - 7
+ - 8
+ - 9
+ - 10
+ - 11
+ - 16
+ - 17
+ - 18
+ - 19
+ - 20
+ - 21
+ - 22
+ - 23
+ - 24
+ - 25
+ - 26
+ - 27
+ - 28
+ - 29
+ - 30
+ - 31
+ - 32
+ - 33
+ - 34
+ - 35
+ - 36
+ - 37
+ - 38
+ - 39
+ - 44
+ - 45
+ - 46
+ - 47
+ - 48
+ - 49
+ - 50
+ - 51
+ - 52
+ - 53
+ - 54
+ - 55
+ disabled_host_interfaces:
+ - 1
+ - 2
+ - 3
+ - 5
+ - 6
+ - 7
+ - 9
+ - 11
+ - 17
+ - 19
+ - 21
+ - 23
+ - 25
+ - 27
+ - 29
+ - 31
+ - 33
+ - 35
+ - 37
+ - 39
+ - 45
+ - 47
+ - 49
+ - 51
+ - 53
+ - 55
+ VMs:
+ ARISTA01T1:
+ vlans:
+ - 12
+ vm_offset: 0
+ ARISTA02T1:
+ vlans:
+ - 13
+ vm_offset: 1
+ ARISTA03T1:
+ vlans:
+ - 14
+ vm_offset: 2
+ ARISTA04T1:
+ vlans:
+ - 15
+ vm_offset: 3
+ ARISTA05T1:
+ vlans:
+ - 40
+ vm_offset: 4
+ ARISTA06T1:
+ vlans:
+ - 41
+ vm_offset: 5
+ ARISTA07T1:
+ vlans:
+ - 42
+ vm_offset: 6
+ ARISTA08T1:
+ vlans:
+ - 43
+ vm_offset: 7
+ DUT:
+ portchannel_config:
+ PortChannel101:
+ intfs: [0, 4]
+ vlan_configs:
+ default_vlan_config: two_vlan_a
+ two_vlan_a:
+ Vlan101:
+ id: 101
+ intfs: []
+ portchannels: ['PortChannel101']
+ prefix: 192.168.100.1/21
+ prefix_v6: fc02:100::1/64
+ tag: 101
+ Vlan102:
+ id: 102
+ intfs: [8, 10, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 44, 46, 48, 50, 52, 54]
+ portchannels: ['PortChannel101']
+ prefix: 192.168.200.1/21
+ prefix_v6: fc02:200::1/64
+ tag: 102
+
+configuration_properties:
+ common:
+ dut_asn: 65100
+ dut_type: ToRRouter
+ swrole: leaf
+ podset_number: 200
+ tor_number: 16
+ tor_subnet_number: 2
+ max_tor_subnet_number: 16
+ tor_subnet_size: 128
+ spine_asn: 65534
+ leaf_asn_start: 64600
+ tor_asn_start: 65100
+ failure_rate: 0
+ nhipv4: 10.10.246.254
+ nhipv6: FC0A::FF
+
+configuration:
+ ARISTA01T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.56
+ - FC00::71
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.29/32
+ ipv6: 2064:100::1d/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.57/31
+ ipv6: fc00::72/126
+ bp_interface:
+ ipv4: 10.10.246.29/24
+ ipv6: fc0a::3a/64
+
+ ARISTA02T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.58
+ - FC00::75
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.30/32
+ ipv6: 2064:100::1e/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.59/31
+ ipv6: fc00::76/126
+ bp_interface:
+ ipv4: 10.10.246.30/24
+ ipv6: fc0a::3d/64
+
+ ARISTA03T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.60
+ - FC00::79
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.31/32
+ ipv6: 2064:100::1f/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.61/31
+ ipv6: fc00::7a/126
+ bp_interface:
+ ipv4: 10.10.246.31/24
+ ipv6: fc0a::3e/64
+
+ ARISTA04T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.62
+ - FC00::7D
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.32/32
+ ipv6: 2064:100::20/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.63/31
+ ipv6: fc00::7e/126
+ bp_interface:
+ ipv4: 10.10.246.32/24
+ ipv6: fc0a::41/64
+
+ ARISTA05T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.64
+ - FC00::81
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.33/32
+ ipv6: 2064:100::21/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.65/31
+ ipv6: fc00::82/126
+ bp_interface:
+ ipv4: 10.10.246.33/24
+ ipv6: fc0a::3a/64
+
+ ARISTA06T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.66
+ - FC00::85
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.34/32
+ ipv6: 2064:100::22/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.67/31
+ ipv6: fc00::86/126
+ bp_interface:
+ ipv4: 10.10.246.34/24
+ ipv6: fc0a::3d/64
+
+ ARISTA07T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.68
+ - FC00::89
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.35/32
+ ipv6: 2064:100::23/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.69/31
+ ipv6: fc00::8a/126
+ bp_interface:
+ ipv4: 10.10.246.35/24
+ ipv6: fc0a::3e/64
+
+ ARISTA08T1:
+ properties:
+ - common
+ bgp:
+ asn: 64600
+ peers:
+ 65100:
+ - 10.0.0.70
+ - FC00::8D
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.36/32
+ ipv6: 2064:100::20/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.71/31
+ ipv6: fc00::8e/126
+ bp_interface:
+ ipv4: 10.10.246.36/24
+ ipv6: fc0a::41/64
diff --git a/ansible/vars/topo_t0-80.yml b/ansible/vars/topo_t0-80.yml
new file mode 100644
index 00000000000..0340ecfce5d
--- /dev/null
+++ b/ansible/vars/topo_t0-80.yml
@@ -0,0 +1,343 @@
+topology:
+ host_interfaces:
+ - 0
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - 6
+ - 7
+ - 8
+ - 9
+ - 10
+ - 11
+ - 12
+ - 13
+ - 14
+ - 15
+ - 16
+ - 17
+ - 18
+ - 19
+ - 20
+ - 21
+ - 22
+ - 23
+ - 24
+ - 25
+ - 26
+ - 27
+ - 28
+ - 29
+ - 30
+ - 31
+ - 32
+ - 33
+ - 34
+ - 35
+ - 36
+ - 37
+ - 38
+ - 39
+ - 40
+ - 41
+ - 42
+ - 43
+ - 44
+ - 45
+ - 46
+ - 47
+ - 52
+ - 53
+ - 54
+ - 55
+ - 56
+ - 57
+ - 58
+ - 59
+ - 60
+ - 61
+ - 62
+ - 63
+ - 64
+ - 65
+ - 66
+ - 67
+
+ VMs:
+ ARISTA01T1:
+ vlans:
+ - 48
+ - 49
+ vm_offset: 0
+ ARISTA02T1:
+ vlans:
+ - 50
+ - 51
+ vm_offset: 1
+
+ ARISTA03T1:
+ vlans:
+ - 68
+ - 69
+ vm_offset: 2
+ ARISTA04T1:
+ vlans:
+ - 70
+ - 71
+ vm_offset: 3
+ ARISTA05T1:
+ vlans:
+ - 72
+ - 73
+ vm_offset: 4
+ ARISTA06T1:
+ vlans:
+ - 74
+ - 75
+ vm_offset: 5
+ ARISTA07T1:
+ vlans:
+ - 76
+ - 77
+ vm_offset: 6
+ ARISTA08T1:
+ vlans:
+ - 78
+ - 79
+ vm_offset: 7
+
+ DUT:
+ vlan_configs:
+ default_vlan_config: one_vlan_a
+ one_vlan_a:
+ Vlan1000:
+ id: 1000
+ intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67]
+ prefix: 192.168.0.1/21
+ prefix_v6: fc02:1000::1/64
+ tag: 1000
+ two_vlan_a:
+ Vlan100:
+ id: 100
+ intfs: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]
+ prefix: 192.168.100.1/21
+ prefix_v6: fc02:100::1/64
+ tag: 100
+ Vlan200:
+ id: 200
+ intfs: [33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67]
+ prefix: 192.168.200.1/21
+ prefix_v6: fc02:200::1/64
+ tag: 200
+
+configuration_properties:
+ common:
+ dut_asn: 64601
+ dut_type: ToRRouter
+ swrole: leaf
+ podset_number: 200
+ tor_number: 16
+ tor_subnet_number: 2
+ max_tor_subnet_number: 16
+ tor_subnet_size: 128
+ spine_asn: 65534
+ leaf_asn_start: 64802
+ tor_asn_start: 64601
+ failure_rate: 0
+ nhipv4: 10.10.246.254
+ nhipv6: FC0A::FF
+
+configuration:
+ ARISTA01T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.0
+ - FC00::1
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.1/32
+ ipv6: 2064:100::1/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.1/31
+ ipv6: fc00::2/126
+ bp_interface:
+ ipv4: 10.10.246.1/24
+ ipv6: fc0a::1/64
+
+ ARISTA02T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.4
+ - FC00::9
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.2/32
+ ipv6: 2064:100::2/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.5/31
+ ipv6: fc00::a/126
+ bp_interface:
+ ipv4: 10.10.246.2/24
+ ipv6: fc0a::2/64
+
+ ARISTA03T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.8
+ - FC00::11
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.3/32
+ ipv6: 2064:100::3/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.9/31
+ ipv6: fc00::12/126
+ bp_interface:
+ ipv4: 10.10.246.3/24
+ ipv6: fc0a::3/64
+
+ ARISTA04T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.12
+ - FC00::19
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.4/32
+ ipv6: 2064:100::4/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.13/31
+ ipv6: fc00::1a/126
+ bp_interface:
+ ipv4: 10.10.246.4/24
+ ipv6: fc0a::4/64
+
+ ARISTA05T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.16
+ - FC00::21
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.5/32
+ ipv6: 2064:100::5/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.17/31
+ ipv6: fc00::22/126
+ bp_interface:
+ ipv4: 10.10.246.5/24
+ ipv6: fc0a::5/64
+
+ ARISTA06T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.20
+ - FC00::29
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.6/32
+ ipv6: 2064:100::6/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.21/31
+ ipv6: fc00::2a/126
+ bp_interface:
+ ipv4: 10.10.246.6/24
+ ipv6: fc0a::6/64
+
+ ARISTA07T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.24
+ - FC00::31
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.7/32
+ ipv6: 2064:100::7/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.25/31
+ ipv6: fc00::32/126
+ bp_interface:
+ ipv4: 10.10.246.7/24
+ ipv6: fc0a::7/64
+
+ ARISTA08T1:
+ properties:
+ - common
+ bgp:
+ asn: 64802
+ peers:
+ 64601:
+ - 10.0.0.28
+ - FC00::39
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.8/32
+ ipv6: 2064:100::8/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.29/31
+ ipv6: fc00::3a/126
+ bp_interface:
+ ipv4: 10.10.246.8/24
+ ipv6: fc0a::8/64
diff --git a/ansible/vars/topo_t1-8-lag.yml b/ansible/vars/topo_t1-8-lag.yml
new file mode 100644
index 00000000000..6a55967cf7c
--- /dev/null
+++ b/ansible/vars/topo_t1-8-lag.yml
@@ -0,0 +1,196 @@
+topology:
+ VMs:
+ ARISTA01T2:
+ vlans:
+ - 0
+ - 1
+ vm_offset: 0
+ ARISTA03T2:
+ vlans:
+ - 2
+ - 3
+ vm_offset: 1
+ ARISTA01T0:
+ vlans:
+ - 4
+ vm_offset: 2
+ ARISTA02T0:
+ vlans:
+ - 5
+ vm_offset: 3
+ ARISTA03T0:
+ vlans:
+ - 6
+ vm_offset: 4
+ ARISTA04T0:
+ vlans:
+ - 7
+ vm_offset: 5
+
+configuration_properties:
+ common:
+ dut_asn: 65100
+ dut_type: LeafRouter
+ nhipv4: 10.10.246.254
+ nhipv6: FC0A::FF
+ podset_number: 200
+ tor_number: 16
+ tor_subnet_number: 2
+ max_tor_subnet_number: 16
+ tor_subnet_size: 128
+ spine:
+ swrole: spine
+ tor:
+ swrole: tor
+
+configuration:
+ ARISTA01T2:
+ properties:
+ - common
+ - spine
+ bgp:
+ asn: 65200
+ peers:
+ 65100:
+ - 10.0.0.0
+ - FC00::1
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.1/32
+ ipv6: 2064:100::1/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.1/31
+ ipv6: fc00::2/126
+ bp_interface:
+ ipv4: 10.10.246.1/24
+ ipv6: fc0a::2/64
+
+ ARISTA03T2:
+ properties:
+ - common
+ - spine
+ bgp:
+ asn: 65200
+ peers:
+ 65100:
+ - 10.0.0.4
+ - FC00::9
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.3/32
+ ipv6: 2064:100::3/128
+ Ethernet1:
+ lacp: 1
+ Ethernet2:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.5/31
+ ipv6: fc00::a/126
+ bp_interface:
+ ipv4: 10.10.246.3/24
+ ipv6: fc0a::6/64
+
+ ARISTA01T0:
+ properties:
+ - common
+ - tor
+ tornum: 1
+ bgp:
+ asn: 64001
+ peers:
+ 65100:
+ - 10.0.0.32
+ - FC00::41
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.17/32
+ ipv6: 2064:100::11/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.33/31
+ ipv6: fc00::42/126
+ bp_interface:
+ ipv4: 10.10.246.17/24
+ ipv6: fc0a::22/64
+
+ ARISTA02T0:
+ properties:
+ - common
+ - tor
+ tornum: 2
+ bgp:
+ asn: 64002
+ peers:
+ 65100:
+ - 10.0.0.34
+ - FC00::45
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.18/32
+ ipv6: 2064:100::12/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.35/31
+ ipv6: fc00::46/126
+ bp_interface:
+ ipv4: 10.10.246.18/24
+ ipv6: fc0a::25/64
+
+ ARISTA03T0:
+ properties:
+ - common
+ - tor
+ tornum: 3
+ bgp:
+ asn: 64003
+ peers:
+ 65100:
+ - 10.0.0.36
+ - FC00::49
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.19/32
+ ipv6: 2064:100::13/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.37/31
+ ipv6: fc00::4a/126
+ bp_interface:
+ ipv4: 10.10.246.19/24
+ ipv6: fc0a::26/64
+ vips:
+ ipv4:
+ prefixes:
+ - 200.0.1.0/26
+ asn: 64700
+
+ ARISTA04T0:
+ properties:
+ - common
+ - tor
+ tornum: 4
+ bgp:
+ asn: 64004
+ peers:
+ 65100:
+ - 10.0.0.38
+ - FC00::4D
+ interfaces:
+ Loopback0:
+ ipv4: 100.1.0.20/32
+ ipv6: 2064:100::14/128
+ Ethernet1:
+ lacp: 1
+ Port-Channel1:
+ ipv4: 10.0.0.39/31
+ ipv6: fc00::4e/126
+ bp_interface:
+ ipv4: 10.10.246.20/24
+ ipv6: fc0a::29/64
diff --git a/ansible/veos b/ansible/veos
index a111ce44e6b..736af4baf5c 100644
--- a/ansible/veos
+++ b/ansible/veos
@@ -23,6 +23,7 @@ all:
- ptf64
- t0-64
- t0-64-32
+ - t0-80
- t0-116
- dualtor
- dualtor-56
diff --git a/ansible/veos_vtb b/ansible/veos_vtb
index 4ab6b40ffce..5be43af99f7 100644
--- a/ansible/veos_vtb
+++ b/ansible/veos_vtb
@@ -12,6 +12,7 @@ all:
- t1
- t1-lag
- t1-64-lag
+ - t1-8-lag
- t1-64-lag-clet
- t0
- t0-16
@@ -35,6 +36,7 @@ all:
vlab-05:
vlab-06:
vlab-07:
+ vlab-08:
ptf:
hosts:
ptf-01:
@@ -91,6 +93,8 @@ all:
type: kvm
hwsku: Force10-S6000
serial_port: 9002
+ ansible_password: password
+ ansible_user: admin
vlab-05:
ansible_host: 10.250.0.110
ansible_hostv6: fec0::ffff:afa:a
@@ -116,6 +120,15 @@ all:
num_asics: 6
ansible_password: password
ansible_user: admin
+ vlab-08:
+ ansible_host: 10.250.0.112
+ ansible_hostv6: fec0::ffff:afa:c
+ type: kvm
+ hwsku: msft_four_asic_vs
+ serial_port: 9005
+ num_asics: 4
+ ansible_password: password
+ ansible_user: admin
vlab-simx-01:
ansible_host: 10.250.0.103
ansible_hostv6: fec0::ffff:afa:3
diff --git a/ansible/vtestbed.csv b/ansible/vtestbed.csv
index 93a9f97c9b8..2f0a300f176 100644
--- a/ansible/vtestbed.csv
+++ b/ansible/vtestbed.csv
@@ -1,7 +1,8 @@
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-01],Tests virtual switch vm
-vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-02],Tests virtual switch vm
-vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,fec0::ffff:afa:6/64,server_1,VM0104,[vlab-03],Tests virtual switch vm
-vms-kvm-t0-2,vms6-3,t0,docker-ptf,ptf-03,10.250.0.108/24,fec0::ffff:afa:8/64,server_1,VM0104,[vlab-04],Tests virtual switch vm
-vms-kvm-dual-t0,vms6-4,dualtor,docker-ptf,ptf-04,10.250.0.109/24,fec0::ffff:afa:9/64,server_1,VM0108,[vlab-05;vlab-06],Dual-TOR testbed
-vms-kvm-multi-asic-t1-lag,vms6-4,t1-64-lag,docker-ptf,ptf-05,10.250.0.110/24,fec0::ffff:afa:a/64,server_1,VM0104,[vlab-07],Tests multi-asic virtual switch vm
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+vms-kvm-t0,vms6-1,t0,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-01],veos_vtb,False,Tests virtual switch vm
+vms-kvm-t0-64,vms6-1,t0-64,docker-ptf,ptf-01,10.250.0.102/24,fec0::ffff:afa:2/64,server_1,VM0100,[vlab-02],veos_vtb,False,Tests virtual switch vm
+vms-kvm-t1-lag,vms6-2,t1-lag,docker-ptf,ptf-02,10.250.0.106/24,fec0::ffff:afa:6/64,server_1,VM0104,[vlab-03],veos_vtb,False,Tests virtual switch vm
+vms-kvm-t0-2,vms6-3,t0,docker-ptf,ptf-03,10.250.0.108/24,fec0::ffff:afa:8/64,server_1,VM0104,[vlab-04],veos_vtb,False,Tests virtual switch vm
+vms-kvm-dual-t0,vms6-4,dualtor,docker-ptf,ptf-04,10.250.0.109/24,fec0::ffff:afa:9/64,server_1,VM0108,[vlab-05;vlab-06],veos_vtb,False,Dual-TOR testbed
+vms-kvm-multi-asic-t1-lag,vms6-4,t1-64-lag,docker-ptf,ptf-05,10.250.0.110/24,fec0::ffff:afa:a/64,server_1,VM0104,[vlab-07],veos_vtb,False,Tests multi-asic virtual switch vm
+vms-kvm-four-asic-t1-lag,vms6-4,t1-8-lag,docker-ptf,ptf-05,10.250.0.110/24,fec0::ffff:afa:a/64,server_1,VM0104,[vlab-08],veos_vtb,False,Tests multi-asic virtual switch vm
diff --git a/docs/testbed/README.new.testbed.Configuration.md b/docs/testbed/README.new.testbed.Configuration.md
index eb6b0571f91..49e7c0681ae 100644
--- a/docs/testbed/README.new.testbed.Configuration.md
+++ b/docs/testbed/README.new.testbed.Configuration.md
@@ -49,12 +49,12 @@ The devices section is a dictionary that contains all devices and hosts. This se
For each device that you add, add the following:
-| Hostname | ansible_host | ansible_ssh_user | ansible_ssh_pass | HwSKU | device_type |
-| ------ | ------ | ------ | ------ | ------ | ------ |
-| str-msn2700-01 | [IP Address] | [username] | [password] | DevSonic | DevSonic |
-| str-7260-10 | [IP Address] | [username] | [password] |Arista-7260QX-64 | FanoutRoot |
-| str-7260-10 | [IP Address] | [username] | [password] |Arista-7260QX-64 | FanoutLeaf |
-| str-acs-serv-01 | [IP Address] | [username] | [password] | TestServ | Server |
+| Hostname | ansible_host | ansible_ssh_user | ansible_ssh_pass | HwSKU | device_type |
+| --------------- | ------------ | ---------------- | ---------------- | ---------------- | ----------- |
+| str-msn2700-01 | [IP Address] | [username] | [password] | DevSonic | DevSonic |
+| str-7260-10 | [IP Address] | [username] | [password] | Arista-7260QX-64 | FanoutRoot |
+| str-7260-10 | [IP Address] | [username] | [password] | Arista-7260QX-64 | FanoutLeaf |
+| str-acs-serv-01 | [IP Address] | [username] | [password] | TestServ | Server |
- hostname - names the devices you will use
- ansible_host - this is the managementIP where you can connect to to the device
@@ -111,10 +111,10 @@ Define:
This is where the topology configuration file for the testbed will collect information from when running TestbedProcessing.py.
-| #conf-name | group-name | topo | ptf_image_name | ptf_ip | server | vm_base | dut | comment |
-| ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ | ------ |
-| [ptf32 conf-name] | [ptf32 group-name] | [ptf32] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [comment] |
-| [t0 conf-name] | [t0 group-name] | [t0] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [comment] |
+| #conf-name | group-name | topo | ptf_image_name | ptf_ip | server | vm_base | dut | inv_name | auto_recover | comment |
+| ----------------- | ------------------ | ------- | -------------- | ------------ | -------------- | --------- | ----- | ---------- | -------------- | --------- |
+| [ptf32 conf-name] | [ptf32 group-name] | [ptf32] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [inv_name] | [auto_recover] | [comment] |
+| [t0 conf-name] | [t0 group-name] | [t0] | [docker-ptf] | [ip address] | [server group] | [vm_base] | [dut] | [inv_name] | [auto_recover] | [comment] |
For each topology you use in your testbed environment, define the following:
@@ -129,6 +129,8 @@ For each topology you use in your testbed environment, define the following:
- server - server where the testbed resides. Choose a veos_group to use that contains both the lab server and virtual machines
- vm_base - enter in the lowest ID value for the VMs you will be using to run the test cases. The lowest VM ID value can be found under the veos section of the testbed configuration file. IF empty, no VMs are used
- dut - enter in the target DUT that is used in the testbed environment
+- inv_name - inventory file name that contains the definition of the target DUTs
+- auto_recover - (`yes`|`True`|`true`) to recover this testbed when runnings serve recovery script, (`no`|`False`|`false`) otherwise
- comment - make a little note here
- ansible
- ansible_host - IP address with port number
diff --git a/docs/testbed/README.testbed.Cli.md b/docs/testbed/README.testbed.Cli.md
index 22d65128c80..e9d61dd27e7 100644
--- a/docs/testbed/README.testbed.Cli.md
+++ b/docs/testbed/README.testbed.Cli.md
@@ -12,9 +12,9 @@
## Add/Remove topo
```
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-vms1-1-t1,vms1-1,t1,docker-ptf,ptf-1,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,t1 tests
-vms1-1-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-2,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,t1-lag tests
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+vms1-1-t1,vms1-1,t1,docker-ptf,ptf-1,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,lab,True,t1 tests
+vms1-1-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-2,10.0.10.5/23,,server_1,VM0100,str-msn2700-11,lab,False,t1-lag tests
```
Goal is to use one VM with different topologies
diff --git a/docs/testbed/README.testbed.Config.md b/docs/testbed/README.testbed.Config.md
index 19d32950464..316c470f851 100644
--- a/docs/testbed/README.testbed.Config.md
+++ b/docs/testbed/README.testbed.Config.md
@@ -25,10 +25,10 @@
### ```testbed.csv``` format
```
-# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,comment
-ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,Tests ptf
-vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
-vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,Tests vms
+# conf-name,group-name,topo,ptf_image_name,ptf,ptf_ip,ptf_ipv6,server,vm_base,dut,inv_name,auto_recover,comment
+ptf1-m,ptf1,ptf32,docker-ptf,ptf-1,10.255.0.188/24,,server_1,,str-msn2700-01,lab,False,Tests ptf
+vms-t1,vms1-1,t1,docker-ptf,ptf-2,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
+vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-msn2700-01,lab,True,Tests vms
```
@@ -40,6 +40,8 @@ vms-t1-lag,vms1-1,t1-lag,docker-ptf,ptf-3,10.255.0.178/24,,server_1,VM0100,str-m
- server – server where the testbed resides
- vm_base – first VM for the testbed. If empty, no VMs are used
- dut – target dut name
+- inv_name - inventory file name that contains the definition of the target DUTs
+- auto_recover - (`yes`|`True`|`true`) to recover this testbed when runnings serve recovery script, (`no`|`False`|`false`) otherwise
- comment – any text here
### ```testbed.csv``` consistency rules
diff --git a/docs/testbed/README.testbed.Topology.md b/docs/testbed/README.testbed.Topology.md
index 9cd97327f35..27097be4519 100644
--- a/docs/testbed/README.testbed.Topology.md
+++ b/docs/testbed/README.testbed.Topology.md
@@ -70,4 +70,11 @@
- 4 DUT ports are connected to VMs
- PTF container has 4 injected ports and 28 directly connected ports
+### t0-80
+
+
+
+ - Requires 8 VMs
+ - 16 DUT ports are connected to VMs
+ - PTF container has 16 injected ports and 64 directly connected ports
diff --git a/docs/testbed/README.testbed.VsSetup.md b/docs/testbed/README.testbed.VsSetup.md
index fbd3eec94bc..5fc26f8295b 100644
--- a/docs/testbed/README.testbed.VsSetup.md
+++ b/docs/testbed/README.testbed.VsSetup.md
@@ -19,8 +19,8 @@ sudo ./setup-management-network.sh
3. [Install Docker CE](https://docs.docker.com/install/linux/docker-ce/ubuntu/). Be sure to follow the [post-install instructions](https://docs.docker.com/install/linux/linux-postinstall/) so that you don't need sudo privileges to run docker commands.
-## Download an EOS VM image
-We currently use EOS-based VMs to simulate neighboring devices in the virtual testbed, much like we do for physical testbeds. To do so, we need to download the image to our testbed host.
+## Download an VM image
+We currently support EOS-based or SONiC VMs to simulate neighboring devices in the virtual testbed, much like we do for physical testbeds. To do so, we need to download the image to our testbed host.
### Option 1: vEOS (KVM-based) image
1. Download the [vEOS image from Arista](https://www.arista.com/en/support/software-download)
@@ -47,6 +47,9 @@ ceosimage 4.23.2F d53c28e38448
If you want to skip downloading the image when the cEOS image is not imported locally, set `skip_ceos_image_downloading` to `true` in `sonic-mgmt/ansible/group_vars/all/ceos.yml`. Then, when the cEOS image is not locally available, the scripts will not try to download it and will fail with an error message. Please use option 1 to download and import the cEOS image manually.
+#### Option 3: Use SONiC image as neighboring devices
+You need to prepare a sound SONiC image `sonic-vs.img` in `~/veos-vm/images/`. We don't support to download sound sonic image right now, but for testing, you can also follow the section [Download the sonic-vs image](##download-the-sonic-vs-image) to download an available image and put it into the directory `~/veos-vm/images`
+
## Download the sonic-vs image
To run the tests with a virtual SONiC device, we need a virtual SONiC image. The simplest way to do so is to download a public build from Jenkins.
@@ -127,7 +130,7 @@ foo ALL=(ALL) NOPASSWD:ALL
5. Verify that you can use `sudo` without a password prompt inside the host (e.g. `sudo bash`).
-## Setup Arista VMs on the server
+## Setup VMs on the server
**(Skip this step if you are using cEOS - the containers will be automatically setup in a later step.)**
Now we need to spin up some VMs on the host to act as neighboring devices to our virtual SONiC switch.
@@ -136,11 +139,13 @@ Now we need to spin up some VMs on the host to act as neighboring devices to our
```
$ ./testbed-cli.sh -m veos_vtb -n 4 start-vms server_1 password.txt
```
+If you use SONiC image as the VMs, you need to add extract parameters `-k sonic` so that this command is `./testbed-cli.sh -m veos_vtb -n 4 -k sonic start-vms server_1 password.txt`. Of course, if you want to stop VMs, you also need to append these parameters after original command.
- **Reminder:** By default, this shell script requires a password file. If you are not using Ansible Vault, just create a file with a dummy password and pass the filename to the command line.
-2. Check that all VMs are up and running. **Note:** The passwd is `123456`.
+2. Check that all VMs are up and running.
+For the EOS-based VMs **Note:** The passwd is `123456`.
```
$ ansible -m ping -i veos_vtb server_1 -u root -k
VM0102 | SUCCESS => {
@@ -164,7 +169,30 @@ VM0100 | SUCCESS => {
"ping": "pong"
}
```
-
+For the SONiC VMs **Note:** The passwd is `password`.
+```
+$ ansible -m ping -i veos_vtb server_1 -u admin -k
+VM0102 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+}
+VM0101 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+}
+STR-ACS-VSERV-01 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+}
+VM0103 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+}
+VM0100 | SUCCESS => {
+ "changed": false,
+ "ping": "pong"
+}
+```
## Deploy T0 topology
Now we're finally ready to deploy the topology for our testbed! Run the following command, depending on what type of EOS image you are using for your setup:
diff --git a/docs/testbed/img/testbed-t0-80.png b/docs/testbed/img/testbed-t0-80.png
new file mode 100644
index 00000000000..54606651467
Binary files /dev/null and b/docs/testbed/img/testbed-t0-80.png differ
diff --git a/docs/testbed/img/testbed-t2.png b/docs/testbed/img/testbed-t2.png
index b12412dda74..ad5e594de1b 100644
Binary files a/docs/testbed/img/testbed-t2.png and b/docs/testbed/img/testbed-t2.png differ
diff --git a/docs/testplan/BGP Convergence Testplan for single DUT.md b/docs/testplan/BGP Convergence Testplan for single DUT.md
new file mode 100644
index 00000000000..cc654192573
--- /dev/null
+++ b/docs/testplan/BGP Convergence Testplan for single DUT.md
@@ -0,0 +1,124 @@
+# BGP convergence test plan for benchmark performance
+
+- [BGP convergence test plan for benchmark performance](#bgp-convergence-test-plan-for-benchmark-performance)
+ - [Overview](#Overview)
+ - [Scope](#Scope)
+ - [Testbed](#Keysight-Testbed)
+ - [Topology](#Topology)
+ - [SONiC switch as ToR](#SONiC-switch-as-ToR)
+ - [SONiC switch as Leaf](#SONiC-switch-as-Leaf)
+ - [Setup configuration](#Setup-configuration)
+ - [Test methodology](#Test-methodology)
+ - [Test cases](#Test-cases)
+ - [Test case # 1 – Convergence performance when remote link fails (route withdraw)](#test-case--1--convergence-performance-when-remote-link-fails-route-withdraw)
+ - [Test objective](#Test-objective)
+ - [Test steps](#Test-steps)
+ - [Test results](#Test-results)
+ - [Test case # 2 – RIB-IN Convergence](#Test-case--2--RIB-IN-Convergence)
+ - [Test objective](#Test-objective-1)
+ - [Test steps](#Test-steps-1)
+ - [Test results](#Test-results-1)
+ - [Call for action](#Call-for-action)
+
+## Overview
+The purpose of these tests is to test the overall convergence of a data center network by simulating multiple network devices such as ToR/Leafs and using SONiC switch DUT as one of the ToR/Leaf, closely resembling production environment.
+
+### Scope
+These tests are targeted on fully functioning SONiC system. The purpose of these tests are to measure convergence when some unexpected failures such as remote link failure, local link failure, node failure or link faults etc occur and some expected failures such as maintenance or upgrade of devices occur in the SONiC system.
+
+### Keysight Testbed
+The tests will run on following testbeds:
+* t0
+
+
+
+## Topology
+### SONiC switch as ToR
+
+
+
+### SONiC switch as Leaf
+
+
+
+## Setup configuration
+IPv4 EBGP neighborship will be configured between SONiC DUT and directly connected test ports. Test ports inturn will simulate the ToR's and Leafs by advertising IPv4/IPv6, dual-stack routes.
+
+## Test Methodology
+Following test methodologies will be used for measuring convergence.
+* Traffic generator will be used to configure ebgp peering between chassis ports and SONiC DUT by advertising IPv4/IPv6, dual-stack routes.
+* Receiving ports will be advertising the same VIP(virtual IP) addresses.
+* Data traffic will be sent from server to these VIP addresses.
+* Depending on the test case, the faults will be generated. Local link failures can be simulated on the port by "simulating link down" event.
+* Remote link failures can be simulated by withdrawing the routes.
+* Control to data plane convergence will be measured by noting down the precise time of the control plane event and the data plane event. Convergence will be measured by taking the difference between contol and data plane events. Traffic generator will create those events and provide us with the control to data plane convergence value under statistics.
+* RIB-IN Convergence is the time it takes to install the routes in its RIB and then in its FIB to forward the traffic without any loss. In order to measure RIB-IN convergence, initially IPv4/IPv6 routes will not be advertised. Once traffic is sent, IPv4/IPv6 routes will be advertised and the timestamp will be noted. Once the traffic received rate goes above the configured threshold value, it will note down the data plane above threshold timestamp. The difference between these two event timestamps will provide us with the RIB-IN convergence value.
+* Route capacity can be measured by advertising routes in a linear search fashion. By doing this we can figure out the maximum routes a switch can learn and install in its RIB and then in its FIB to forward traffic without any loss.
+
+## Test cases
+### Test case # 1 – Convergence performance when remote link fails (route withdraw)
+#### Test objective
+Measure the convergence time when remote link failure event happens with in the network.
+
+
+
+
+
+
+
+#### Test steps
+* Configure IPv4 EBGP sessions between Keysight ports and the SONiC switch.
+* Advertise IPv4 routes along with AS number via configured IPv4 BGP sessions.
+* Configure and advertise same IPv4 routes from both the test ports.
+* Configure another IPv4 session to send the traffic. This is the server port from which traffic will be sent to the VIP addresses.
+* Start all protocols and verify that IPv4 BGP neighborship is established.
+* Create a data traffic between the server port and receiver ports where the same VIP addresses are configured and enable tracking by "Destination Endpoint" and by "Destination session description".
+* Set the desired threshold value for receiving traffic. By default we will be set to 90% of expected receiving rate.
+* Apply and start the data traffic.
+* Verify that traffic is equally distributed between the receiving ports without any loss.
+* Simulate remote link failure by withdrawing the routes from one receiving port.
+* Verify that the traffic is re-balanced and use the other available path to route the traffic.
+* Drill down by "Destination Endpoint" under traffic statistics to get the control plane to data plane convergence value.
+* In general the convergence value will fall in certain range. In order to achieve proper results, run the test multiple times and average out the test results.
+* Set it back to default configuration.
+#### Test results
+
+
+For above test case, below are the test results when multiple remote link fails.
+
+
+
+
+
+### Test case # 2 – RIB-IN Convergence
+#### Test objective
+Measure the convergence time to install the routes in its RIB and then in its FIB to forward the packets after the routes are advertised.
+
+
+
+
+
+
+#### Test steps
+* Configure IPv4 EBGP sessions between Keysight ports and the SONiC switch.
+* Configure IPv4 routes via configured IPv4 BGP sessions. Initially disable the routes so that they don't get advertised after starting the protocols.
+* Configure the same IPv4 routes from both the test receiving ports.
+* Configure another IPv4 session to send the traffic. This is the server port from which traffic will be sent to the VIP addresses.
+* Start all protocols and verify that IPv4 BGP neighborship is established.
+* Create a data traffic between the server port and receiver ports where the same VIP addresses are configured and enable tracking by "Destination Endpoint" and by "Destination session description".
+* Set the desired threshold value for receiving traffic. By default we will be set to 90% of expected receiving rate.
+* Apply and start the data traffic.
+* Verify that no traffic is being forwarded.
+* Enable/advertise the routes which are already configured.
+* Control plane event timestamp will be noted down and once the receiving traffic rate goes above the configured threshold value, it will note down the data plane threshold timestamp.
+* The difference between these two event timestamp will provide us with the RIB-IN convergence time.
+* In general the convergence value will fall in certain range. In order to achieve proper results, run the test multiple times and average out the test results.
+* Set it back to default configuration.
+#### Test results
+
+
+In order to measure RIB-IN capacity of the switch, we can follow the same test methodology as RIB-IN convergence test. Below are the results for RIB-IN capacity test.
+
+
+### Call for action
+* Solicit experience in multi-DUT system test scenarios.
diff --git a/docs/testplan/BGP-Conformance-IxANVL.md b/docs/testplan/BGP-Conformance-IxANVL.md
new file mode 100644
index 00000000000..ebedf9cc4a2
--- /dev/null
+++ b/docs/testplan/BGP-Conformance-IxANVL.md
@@ -0,0 +1,62 @@
+# BGP conformance using IxANVL
+
+- [BGP conformance using IxANVL](#bgp-conformance-using-ixanvl)
+ - [Purpose](#purpose)
+ - [Scope](#scope)
+ - [IxANVL Testbed](#ixanvl-testbed)
+ - [Topology](#topology)
+ - [Setup configuration](#setup-configuration)
+ - [Test Methodology](#test-methodology)
+ - [Test cases](#test-cases)
+ - [Test results](#test-results)
+ - [Call for actions](#call-for-actions)
+
+## Purpose
+SONiC today uses the FRRouting(FRR) protocol suite for implementing BGP. However in the final deployment network operators may have reasons to customize BGP implementation. Each of these customizations done to BGP needs to be validated to make sure it is not breaking the BGP conformance. This is mandatory to keep interoperability working between different implementations of BGP in a heterogeneous environment which is very common in different tiers of a data center. Network operators have challenges to make sure protocol conformance is maintained at each stage.
+
+A comprehensive way of validating protocol conformance is IxANVL. With IxANVL (Automated Network Validation Library), one can quickly access a vast array of protocol libraries and utilities to validate protocol compliance and interoperability. FRR publishes IxANVL result for each release at https://www.frrouting.org/. IxANVL enables conformance comparisons of customized FRR forks mentioned above to the upstream FRR and other routing implementations.
+
+## Scope
+In this test plan we will integrate IxANVL into the sonic-mgmt testbed server. This will include:
+
+* Deployment of IxANVL docker using Ansible scripts
+* SONiC DUT automation to run the IxANVL tests
+
+Also option for using Keysight chassis connected to physical DUT workflow for ease of use will be included.
+To run these tests users will need to procure IxANVL license.
+
+## IxANVL Testbed
+As discussed above the test bed will be hosted in the testbed server.
+Follow the instruction to setup a virtual test bed from this document:
+https://github.com/Azure/sonic-mgmt/blob/master/docs/testbed/README.testbed.VsSetup.md
+
+Only sonic-mgmt and sonic-vs steps are required.
+For IxANVL download, docker image from Ixia website (Link TBD)
+IxANVL test bed deployment (PR TBD)
+
+## Topology
+
+
+## Setup configuration
+1. Testbed Configuration script brings up the IxANVL docker and the SONiC VS DUT
+2. Testbed Configuration script also brings up the connections between IxANVL docker and the SONiC VS DUT for test channel
+
+## Test Methodology
+Here are the highlevel steps:
+1. Pytest script pushes initial configuration in DUT using fixtures (duthost)
+2. Pytest script kicks off IxANVL execution
+3. IxANVL starts running test cases and does runtime DUT configurations using vtysh
+4. Once the execution completes, pytest script fetches the run results
+
+
+
+### Test cases
+View IxANVL datasheet for details (BGP4 Core)
+
+https://www.keysight.com/us/en/assets/3120-1119/data-sheets/IxANVL-Automated-Network-Validation-Library.pdf
+### Test results
+Number of tests run: 143
+Number of tests passed: 107
+Number of tests failed: 36
+## Call for actions
+What other protocols are cadidates for protocol conformance tests?
diff --git a/docs/testplan/Distributed-VoQ-Arch-test-plan.md b/docs/testplan/Distributed-VoQ-Arch-test-plan.md
new file mode 100644
index 00000000000..a9480b0edba
--- /dev/null
+++ b/docs/testplan/Distributed-VoQ-Arch-test-plan.md
@@ -0,0 +1,800 @@
+# **Distributed VoQ Architecture Test Plan**
+
+ - [Introduction](#intro)
+ - [References](#reference)
+ - [Debuggability](#debug)
+ - [Test Setup](#test-setup)
+ - [Test Cases](#test-cases)
+ - [System Initialization](#sys_init)
+ - [Neighbors](#arp)
+ - [Router Interfaces](#ri)
+ - [Host IP Connectivity](#ipfwd)
+ - [Inband VLAN](#inbandvlan)
+
+
+# Introduction
+
+This is the test plan for SONIC Distributed VOQ support, as described in the [Distributed VOQ HLD](https://github.com/Azure/SONiC/blob/master/doc/voq/voq_hld.md).
+
+The associated PRs covered in this test plan are:
+
+1. [Distributed VOQ PR 380](https://github.com/Azure/sonic-swss-common/pull/380)
+2. [Distributed VOQ PR 657](https://github.com/Azure/sonic-sairedis/pull/657)
+3. [Distributed VOQ PR 1431](https://github.com/Azure/sonic-swss/pull/1431)
+
+Redis CLI commands will be used for some validation until SONIC CLI commands are available for system port information.
+
+## Scope
+
+The functionalty covered in this test plan is:
+* system ports,
+* router interfaces, when configured on multiple cards, and
+* neighbors, when learned on local and remote ports.
+
+Other HLDs in the [Chassis Subgroup feature list](https://github.com/Azure/SONiC/wiki/SONiC-Chassis-Subgroup) will be covered in other test plans.
+
+## Debuggability
+The following are useful commands for validating the testcases that follow.
+
+1. Keys from redis in container when no redis-dump exists:
+
+`docker exec database1 redis-cli -h -n 6 KEYS "*"`
+
+2. Values from redis in container when no redis-dump exists:
+
+`docker exec database1 redis-cli -h -n 6 hgetall "SYSTEM_NEIGH_TABLE|Inband4|3.3.3.5"`
+
+3. Chassis App Database on Supervisor card:
+
+`redis-dump -h -p 6380 -d 12 -y -k "*SYSTEM_INT*"`
+
+
+# Test Setup
+
+These test cases will be run in the proposed [T2 topology](https://github.com/Azure/sonic-mgmt/pull/2638/). It is assumed that such a configuration is deployed on the chassis.
+
+# Test Cases
+
+## System Initialization
+
+#### Test Case 1. System Bringup
+
+##### Test Objective
+Verify VoQ system initializes correctly on startup.
+
+##### Test Steps
+* Configure a VoQ system with valid configuration files and verify the system comes up.
+* Verify supervisor card is up, and all required containers and processes are running.
+* Verify redis on supervisor is running and Chassis AppDB is reachable.
+* Verify line cards are up and reachable from supervisor.
+
+#### Test Case 2. Switch Creation
+##### Test Objective
+Verify ASIC Switch object is correct on all line cards.
+
+##### Test Steps
+* Verify ASIC_DB gets switch object created on all asics and linecards (redis-dump -h -d 1 on each linecard)
+* Verify switch ID, cores, port list in ASIC DB have the same values as the config_db.json file.
+* Verify switch type is voq.
+
+##### Sample output
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": {
+ "expireat": 1550863898.649604,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "NULL": "NULL",
+ "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_SEED": "0",
+ "SAI_SWITCH_ATTR_FDB_AGING_TIME": "600",
+ "SAI_SWITCH_ATTR_FDB_EVENT_NOTIFY": "0x55df0bc54540",
+ "SAI_SWITCH_ATTR_INIT_SWITCH": "true",
+ "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_SEED": "0",
+ "SAI_SWITCH_ATTR_MAX_SYSTEM_CORES": "48",
+ "SAI_SWITCH_ATTR_PORT_STATE_CHANGE_NOTIFY": "0x55df0bc54550",
+ "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "14:7B:AC:3A:C9:7F",
+ "SAI_SWITCH_ATTR_SWITCH_HARDWARE_INFO": "8:48,52,58,48,48,46,48,0",
+ "SAI_SWITCH_ATTR_SWITCH_ID": "36",
+ "SAI_SWITCH_ATTR_SWITCH_SHUTDOWN_REQUEST_NOTIFY": "0x55df0bc54560",
+ "SAI_SWITCH_ATTR_SYSTEM_PORT_CONFIG_LIST": "{\"count\":304,\"list\":[{\"attached_core_index\":\"0\", etc
+```
+
+#### Test Case 3. System port creation.
+##### Test Objective
+Verify system ports are created on all line cards.
+
+##### Test Steps
+* Verify ASIC_DB get all system ports referenced in config_db.json created on all hosts and ASICs.
+* Verify object creation and values of port attributes.
+
+##### Sample output
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT:oid:0x5d0000000000e4": {
+ "expireat": 1550863898.617927,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "NULL": "NULL",
+ "SAI_SYSTEM_PORT_ATTR_CONFIG_INFO": "{\"attached_core_index\":\"0\",\"attached_core_port_index\":\"20\",\"attached_switch_id\":\"18\",\"num_voq\":\"8\",\"port_id\":\"596\",\"speed\":\"400000\"}"
+ }
+ },
+```
+
+
+#### Test Case 4. Local Ports
+##### Test Objective
+Verify local ports are created on all line cards.
+
+##### Test Steps
+* Verify ASIC_DB has host interface information for all local ports on all cards and ASICs.
+* Verify host interfaces exist on host CLI (ifconfig).
+* Verify interfaces exist in show interfaces on the linecard.
+
+##### Sample output
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF:oid:0xd00000000126b": {
+ "expireat": 1550863898.591804,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_HOSTIF_ATTR_NAME": "Ethernet0",
+ "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000002",
+ "SAI_HOSTIF_ATTR_OPER_STATUS": "false",
+ "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV"
+ }
+ },
+```
+```
+admin@dut1-imm2:~$ sudo ifconfig Ethernet0
+Ethernet0: flags=4098 mtu 9100
+ ether 14:7b:ac:3a:c9:7f txqueuelen 1000 (Ethernet)
+ RX packets 0 bytes 0 (0.0 B)
+ RX errors 0 dropped 0 overruns 0 frame 0
+ TX packets 0 bytes 0 (0.0 B)
+ TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
+```
+
+#### Test Case 5. Router Interface Creation
+##### Test Objective
+Verify router interfaces are created on all line cards and present in Chassis App Db.
+
+##### Test Steps
+* Verify router interface creation on local ports in ASIC DB.
+* PORT_ID should match system port table and traced back to config_db.json, mac and MTU should match as well.
+* Verify SYSTEM_INTERFACE table in Chassis AppDb (redis-dump -h -p 6380 -d 12 on supervisor).
+* Verify creation interfaces with different MTUs in config_db.json.
+* Verify creation of different subnet masks in config_db.json.
+* Repeat with IPv4, IPv6, dual-stack.
+
+##### Sample output
+ASIC:
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE:oid:0x60000000012b3": {
+ "expireat": 1550863898.6557322,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_ROUTER_INTERFACE_ATTR_MTU": "9100",
+ "SAI_ROUTER_INTERFACE_ATTR_PORT_ID": "oid:0x5d00000000015a",
+ "SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS": "14:7B:AC:3A:C9:7F",
+ "SAI_ROUTER_INTERFACE_ATTR_TYPE": "SAI_ROUTER_INTERFACE_TYPE_PORT",
+ "SAI_ROUTER_INTERFACE_ATTR_VIRTUAL_ROUTER_ID": "oid:0x3000000000027"
+ }
+ },
+```
+
+Chassis AppDB:
+
+```
+ "SYSTEM_INTERFACE|Slot7|Asic0|Ethernet24": {
+ "expireat": 1605628181.7629092,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "rif_id": "oid:0x19000600001499"
+ }
+ },
+```
+
+#### Test Case 6. Inband Configuration Type
+
+##### Test Objective
+Verify inband ports, neighbors, and routes are setup as in device configuration.
+
+##### Test Steps
+
+* Configure system in inband port mode.
+```
+"VOQ_INBAND_INTERFACE": {
+ "Inband3": {
+ "inband_type": "port"
+ },
+ "Inband3|133.133.133.4/32": {}
+},
+```
+* On each linecard, verify inband ports are present in ASICDB.
+* On each linecard, verify inband router interfaces are present in ASICDB
+* On supervisor card, verify inband router interfaces are present in Chassis App DB
+* On each linecard, verify permanent neighbors for all inband ports.
+* On each linecard, verify kernel routes for all inband ports.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 7. Local Neighbors
+
+##### Test Objective
+Verify neighbor entries are created on linecards for locally adjacent VMS.
+
+##### Test Steps
+* ARP/NDP should be resolved when BGP to adjacent VMs is established.
+* On local linecard, verify ASIC DB entries.
+ * MAC address matches MAC of neighbor VM.
+ * Router interface OID matches back to the correct interface and port the neighbor was learned on.
+* On local linecard, verify show arp/ndp, ip neigh commands.
+ * MAC address matches MAC of neighbor VM.
+* On local linecard. verify neighbor table in appDB.
+ * MAC address matches MAC of neighbor VM.
+* On supervisor card, verify SYSTEM_NEIGH table in Chassis AppDB (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify encap index and MAC address match between ASICDB the Chassis AppDB
+* Repeat with IPv4, IPv6, dual-stack.
+
+##### Sample output
+* Asic:
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"102.0.0.1\",\"rif\":\"oid:0x6000000001290\",\"switch_id\":\"oid:0x21000000000000\"}": {
+ "expireat": 1550863898.638045,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS": "6E:3A:88:CF:C6:2A",
+ "SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX": "1074790407"
+ }
+ },
+
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP:oid:0x40000000012c2": {
+ "expireat": 1550863898.637784,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEXT_HOP_ATTR_IP": "102.0.0.1",
+ "SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID": "oid:0x6000000001290",
+ "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_IP"
+ }
+ },
+```
+* AppDb:
+```
+ "NEIGH_TABLE:Ethernet8:102.0.0.1": {
+ "expireat": 1550863889.965874,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "family": "IPv4",
+ "neigh": "6e:3a:88:cf:c6:2a"
+ }
+ },
+```
+* Chassis AppDb:
+```
+ "SYSTEM_NEIGH|Slot7|Asic0|Ethernet8:102.0.0.1": {
+ "expireat": 1605628181.762964,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "encap_index": "1074790407",
+ "neigh": "6e:3a:88:cf:c6:2a"
+ }
+ },
+```
+
+#### Test Case 8. Remote Neighbors
+
+##### Test Objective
+Verify when local neighbors are established on a linecard, other linecards in the VoQ system will be programmed with neighbor entries.
+
+##### Test Steps
+* When local neighbors are established as in the Local Neighbor testcase, corresponding entries will be established on all other line cards. On each remote card, verify:
+* Verify ASIC DB entries on remote linecards.
+ * Verify impose index=True in ASIC DB.
+ * Verify MAC address in ASIC DB is the remote neighbor mac.
+ * Verify encap index for ASIC DB entry matches Chassis App DB.
+ * Verify router interface OID matches the interface the neighbor was learned on.
+* Verify on linecard CLI, show arp/ndp, ip neigh commands.
+ * For inband port, MAC should be inband port mac in kernel table and LC appDb.
+ * For inband vlan mode, MAC will be remote ASIC mac in kernel table and LC appdb.
+* Verify neighbor table in linecard appdb.
+* Verify static route is installed in kernel routing table with /32 (or /128 for IPv6) for neighbor entry.
+* Repeat with IPv4, IPv6, dual-stack.
+
+##### Sample Output
+* Asic DB
+```
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY:{\"ip\":\"101.0.0.1\",\"rif\":\"oid:0x60000000012b3\",\"switch_id\":\"oid:0x21000000000000\"}": {
+ "expireat": 1550863898.651915,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS": "4E:49:E4:62:ED:88",
+ "SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_IMPOSE_INDEX": "true",
+ "SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX": "1074790407",
+ "SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL": "false"
+ }
+ },
+
+ "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP:oid:0x40000000012c0": {
+ "expireat": 1550863898.6276,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "SAI_NEXT_HOP_ATTR_IP": "101.0.0.1",
+ "SAI_NEXT_HOP_ATTR_ROUTER_INTERFACE_ID": "oid:0x60000000012b3",
+ "SAI_NEXT_HOP_ATTR_TYPE": "SAI_NEXT_HOP_TYPE_IP"
+ }
+ },
+```
+* App DB
+```
+ "NEIGH_TABLE:Inband18:101.0.0.1": {
+ "expireat": 1550863889.96545,
+ "ttl": -0.001,
+ "type": "hash",
+ "value": {
+ "family": "IPv4",
+ "neigh": "14:7b:ac:3a:c9:7f"
+ }
+ },
+```
+* Chassis App DB
+```
+"SYSTEM_NEIGH|Slot8|Asic0|Ethernet23:101.0.0.1": {
+"expireat": 1605628181.7629762,
+"ttl": -0.001,
+"type": "hash",
+"value": {
+ "encap_index": "1074790407",
+ "neigh": "4e:49:e4:62:ed:88"
+}
+},
+```
+
+* Host
+```
+show ip route
+K>* 101.0.0.1/32 [0/0] is directly connected, Inband18, 20:55:26
+
+netstat -rn
+101.0.0.1 0.0.0.0 255.255.255.255 UH 0 0 0 Inband18
+```
+
+
+
+## Neighbor Lifecycle
+
+### Preconditions
+
+In order to verify neighbor behaviors, BGP sessions on the DUT and attached VMs will be temporarily shutdown. This
+will allow the tests to validate the various table deletes before the entries are recreated.
+
+
+### Test cases
+
+#### Test Case 1. Neighbor established from a remote card.
+
+##### Test Objective
+Verify local neighbor behavior is correct when ARP/NDP request is triggered by a packet from a remote line card.
+
+##### Test Steps
+* Send ping to from linecard 1 to a VM attached to linecard 2. This will establish a local neighbor on linecard 2 and a remote neighbor on linecard 1.
+
+* On linecard 2:
+ * Verify ASIC DB entries on local linecard.
+ * MAC address matches MAC of neighbor VM.
+ * Router interface OID matches back to the correct interface and port the neighbor was learned on.
+ * Verify on local linecard CLI, show arp/ndp, ip neigh commands.
+ * MAC address matches MAC of neighbor VM.
+ * Verify neighbor table in linecard appDB.
+ * MAC address matches MAC of neighbor VM.
+
+* On supervisor card:
+ * Verify SYSTEM_NEIGH table in Chassis AppDB (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify encap index and MAC address match between ASICDB the Chassis AppDB
+
+* On linecard 1:
+ * Verify ASIC DB entries as a remote neighbor.
+ * Verify impose index=True in ASIC DB.
+ * Verify MAC address in ASIC DB is the remote neighbor mac.
+ * Verify encap index for ASIC DB entry matches Chassis App DB.
+ * Verify router interface OID matches the interface the neighbor was learned on.
+ * Verify on linecard CLI, show arp/ndp, ip neigh commands.
+ * For inband port, MAC should be inband port mac in kernel table and LC appDb.
+ * For inband vlan mode, MAC will be remote ASIC mac in kernel table and LC appdb.
+ * Verify neighbor table in linecard appdb.
+ * Verify static route in kernel with /32 for entry.
+
+#### Test Case 2. Clear ARP, single address.
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly deleted when a single neighbor adjacency is cleared.
+##### Test Steps
+* On local linecard:
+ * Clear single address with command: `ip neigh flush to "addr"`.
+ * Verify ARP/NDP entry removed from CLI.
+ * Verify table entries in ASIC, AppDb are removed for only the cleared address.
+* On Supervisor card:
+ * Verify Chassis App DB entry are removed for only the cleared address.
+* On remote linecards:
+ * Verify table entries in ASICDB, APPDB, and host ARP table are removed.
+ * Verify kernel route for cleared address is deleted.
+* Restart traffic, verify relearn.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 3. Clear ARP table via sonic command.
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly deleted when the entire neighbor table is cleared.
+##### Test Steps
+* On local linecard:
+ * Issue `sonic-clear arp` command. and verify all addresses are removed and kernel routes are deleted on all hosts and ASICs.
+ * Verify ARP/NDP entries are removed from CLI.
+ * Verify table entries in ASIC, AppDb are removed for all cleared addresses.
+* On Supervisor card:
+ * Verify Chassis App DB entry are removed for only the cleared address. Entries for addresses on other line cards
+ should still be present.
+* On remote linecards:
+ * Verify table entries in ASICDB, APPDB, and host ARP table are removed for cleared addresses.
+ * Verify kernel routes for cleared address are deleted.
+* Send full mesh traffic and verify relearn and DB.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 4. Front panel port link flap
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly deleted when the front panel port flaps.
+##### Test Steps
+* Admin down interface on fanout to cause LOS on DUT.
+* On local linecard:
+ * Verify ARP/NDP entries are removed from CLI for neighbors on down port.
+ * Verify table entries in ASIC, AppDb are removed for addresses on down port.
+* On Supervisor card:
+ * Verify Chassis App DB entry are removed for only the cleared address. Entries for addresses on other line cards
+ should still be present.
+* On remote linecards:
+ * Verify table entries in ASICDB, APPDB, and host ARP table are removed for cleared addresses.
+ * Verify kernel routes for cleared address are deleted.
+* Admin interface up, verify recreation after restarting traffic.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+#### Test Case 5. Gratuitous ARP - Previously Known IP
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly updated when receiving a unsolicited ARP packet for a previously known IP address..
+##### Test Steps
+* Clear ARP table on a line card.
+* Send unsolicited ARP packet into linecard for an IP that was known on that card and is now stale.
+* On local linecard:
+ * Verify table entries in local ASIC, APP, host arp table are recreated.
+* On supervisor card:
+ * Verify Chassis App DB entry is correct for the relearned address.
+* On remote linecards:
+ * Verify table entries in remote hosts/ASICs in ASICDB, APPDB, and host ARP table are recreated.
+ * Verify kernel routes in remote hosts are still present.
+* Verify that packets can be sent from local and remote linecards to learned address.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 6. Gratuitous ARP - Known IP - Mac change
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly updated when a unsolicited ARP packet changes the MAC address of learned neighbor.
+##### Test Steps
+* Send unsolicited ARP packet into DUT for an IP known by DUT with a different MAC address for the neighbor.
+* Change the MAC address of the neighbor VM.
+* On local linecard:
+ * Verify table entries in local ASIC, APP, and host ARP table are updated with new MAC.
+* On supervisor card:
+ * Verify Chassis App DB entry is correct for with the updated MAC address.
+* On remote linecards:
+ * Verify table entries in remote hosts/ASICs in APPDB, and host ARP table are still present with inband MAC address
+ * Verify ASIC DB is updated with new MAC.
+ * Verify kernel route in remote hosts are still present to inband port.
+* Verify that packets can be sent from local and remote linecards to learned address.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 7. ARP Request/Reply - Mac change
+##### Test Objective
+Verify tables, databases, and kernel routes are correctly updated when the MAC address of a neighbor changes and is updated via request/reply exchange.
+##### Test Steps
+* Change the MAC address on a remote host that is already present in the ARP table.
+* Without clearing the entry in the DUT, allow the existing entry to time out and the new reply to have the new MAC address.
+* On local linecard:
+ * Verify table entries in local ASIC, APP, and host ARP table are updated with new MAC.
+* On supervisor card:
+ * Verify Chassis App DB entry is correct for with the updated MAC address.
+* On remote linecards:
+ * Verify table entries in remote hosts/ASICs in APPDB, and host ARP table are still present with inband MAC address
+ * Verify ASIC DB is updated with new MAC.
+ * Verify kernel route in remote hosts are still present to inband port.
+* Verify that packets can be sent from local and remote linecards to the learned address.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 8. Disruptive Events
+##### Test Objective
+Verify port, router interface, and neighbor recovery after disruptive events.
+##### Test Steps
+* After the following events:
+ * chassis power cycle,
+ * supervisor reboot,
+* Verify, as in the previous test cases:
+ * Local neighbor learning,
+ * remote neighbor learning and route creation
+ * timeout and clear of neighbors
+
+
+## Router Interface Lifecycle
+
+#### Test Case 1. IP Interface Creation
+##### Test Objective
+Verify Chassis App DB is updated with new interface entry when a new IP Interface is added.
+##### Test Steps
+* Add IP to a previously unconfigured port by adding minigraph configuration to that linecard.
+* Reload the new minigraph and line card.
+* On the line card:
+ * Verify address state in CLI.
+ * Verify interface in ASIC DB
+* On the supervisor card:
+ * Verify the interface is present in the SYSTEM_INTERFACE table of the Chassis App DB.
+ * Verify the OID is unique, and matches the router interface ID in the ASIC DB.
+ * Verify the slot and port are correct.
+* Verify bidirectional traffic to an attached host on the newly created port from local and remote linecards.
+* Repeat with IPv4, IPv6, dual-stack.
+
+#### Test Case 2. Interface Deletion
+##### Test Objective
+Verify Chassis App DB is updated with new interface entry when an IP interface is removed from a port.
+##### Test Steps
+* Remove IP configuration from a previously configured port by removing the minigraph configuration for that port
+on the linecard minigraph.
+* Reload the new minigraph and line card.
+* On the line card:
+ * Verify address is removed from CLI.
+ * Verify interface is removed from ASIC DB.
+* On the supervisor card:
+ * Verify the interface is removed from the SYSTEM_INTERFACE table of the Chassis App DB.
+* Verify bidirectional traffic to attached host on the port from local and remote ASICs is dropped.
+* Repeat with IPv4, IPv6, dual-stack.
+
+
+## Host IP Forwarding
+
+
+### Configuration
+
+Please reference the [T2 topology](https://github.com/Azure/sonic-mgmt/pull/2638/) files topo_t2.yml and testbed-t2.png for network topology and sample IP addresses. The addresses and VMS below are taken from that example topology.
+
+VMs attached to line card 1 and line card 2 will be used for this test.
+DUT Port A&B are on line card 1, D is on line card 2.
+```
+ ---------- DUT ----------
+ |--- LC1 ---|--- LC2 ---|
+VM01T3 -------------|A | |
+ | F0|F1 D|------------- VM01T1
+VM02T3 -------------|B LB1 | LB2 |
+```
+
+_VM01T3_
+* Loopbacks:
+ * ipv4: `100.1.0.1/32`
+ * ipv6: `2064:100::1/128`
+* Ethernet:
+ * ipv4: `10.0.0.1/31`
+ * ipv6: `FC00:2/126`
+
+
+_VM02T3_
+* Loopbacks:
+ * ipv4: `100.1.0.2/32`
+ * ipv6: `2064:100::2/128`
+* Ethernet:
+ * ipv4: `10.0.0.3/31`
+ * ipv6: `FC00:6/126`
+
+_VM01T1_
+* Loopbacks:
+ * ipv4: `100.1.0.33/32`
+ * ipv6: `2064:100::21/128`
+* Ethernet:
+ * ipv4: `10.0.0.65/31`
+ * ipv6: `FC00:82/126`
+
+_DUT_
+
+* Linecard 1
+ * Port A (to VM01T3)
+ * `10.0.0.0/31`
+ * `FC00:1/126`
+ * Port B (to VM02T3)
+ * `10.0.0.2/31`
+ * `FC00:5/126`
+ * Inband IP ( Port F0)
+ * `133.133.133.1`
+ * `2064:133::1`
+ * Loopback LB1
+ * `11.1.0.1/32`
+ * `2064:111::1/128`
+* Linecard 2
+ * Port D (to VM01T1)
+ * `10.0.0.64/31`
+ * `FC00:81/126`
+ * Inband IP (Port F1)
+ * `133.133.133.5`
+ * `2064:133::5`
+ * Loopback LB2
+ * `11.1.0.2/32`
+ * `2064:111::2/128`
+
+#### Test Case 1. Table Verification
+##### Test Objective
+Verify the kernel route table is correct based on the topology.
+##### Test Steps
+* Verify routes for local addresses on both line cards are directly connected.
+* Verify routes for local inband interfaces are directly connected.
+* Verify BGP established between line cards.
+* Verify routes of remote linecard inband interfaces are connected via local linecard inband interface.
+* Verify IP interface addresses on remote network ports have a next hop of their inband IP. On linecard 1, route 10.0.0.64/31 next hop is 133.133.133.5.
+* Verify all learned prefixes from neighbors have their neighbors as next hop.
+* Repeat for IPv4 only, IPv6 only, dual-stack.
+
+#### Test Case 2. Router Interface to Router Interface
+##### Test Objective
+Verify Host IP forwarding for IPv4 and IPv6 for various packet sizes and ttls to local line card interfaces.
+##### Test Steps
+* On linecard 1, send ping from:
+ * DUT IP interface A to DUT IP Interface B. (10.0.0.0 to 10.0.0.2)
+ * DUT IP interface A to DUT IP Interface D. (10.0.0.0 to 10.0.0.64)
+* On linecard 2, send ping from:
+ * DUT IP interface D to DUT IP Interface A.
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 3. Router Interface to neighbor addresses
+##### Test Objective
+Verify Host IP forwarding for IPv4 and IPv6 for various packet sizes and ttls to neighbor addresses.
+##### Test Steps
+* On linecard 1, send ping from:
+ * DUT IP Interface on port A to directly connected neighbor address. (10.0.0.0 to 10.0.0.1)
+ * DUT IP Interface A to neighbor address on port B. (10.0.0.0 to 10.0.0.3)
+ * DUT IP Interface A to neighbor address on port D. (10.0.0.0 to 10.0.0.65)
+* On linecard 2, send ping from:
+ * DUT IP interface D to neighbor address on port A. (10.0.0.64 to 10.0.0.1)
+* On Router 01T3, send ping from:
+ * Router IP interface to DUT address on port A. (10.0.0.1 to 10.0.0.0)
+ * Router IP interface to DUT address on port D. (10.0.0.1 to 10.0.0.64)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 4. Router Interface to routed addresses.
+##### Test Objective
+Verify Host IP forwarding for IPv4 and IPv6 for various packet sizes and ttls to learned route addresses.
+##### Test Steps
+* On linecard 1, send ping from:
+ * DUT IP Interface A to routed loopback address from router 01T3. (10.0.0.0 to 100.1.0.1)
+ * DUT IP Interface A to routed loopback address from router 02T3. (10.0.0.0 to 100.1.0.2)
+ * DUT IP Interface A to routed loopback address from router 01T1. (10.0.0.0 to 100.1.0.33)
+* On linecard 2, send ping from:
+ * DUT IP interface D to routed loopback address from router 01T3. (200.0.0.1 to 100.1.0.1)
+* On Router 01T3, send ping from:
+ * Router loopback interface to DUT address on port A. (100.1.0.1 to 10.0.0.0)
+ * Router loopback interface to DUT address on port D. (100.1.0.1 to 10.0.0.64)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 5. Inband Router Interface connectivity
+##### Test Objective
+Verify IP connectivity over inband interfaces.
+##### Test Steps
+* On linecard 1 send ping from:
+ * Inband interface F0 to inband interface F1 (133.133.133.1 to 133.133.133.5)
+ * Inband interface F0 to interface D (133.133.133.1 to 10.0.0.64)
+ * Inband interface F0 to neighbor on port A (133.133.133.1 to 10.0.0.1)
+ * Inband interface F0 to neighbor on port D (133.133.133.1 to 10.0.0.65)
+ * Inband interface F0 to routed loopback from router 01T3 (133.133.133.1 to 100.1.0.1)
+ * Inband interface F0 to routed loopback from router 01T1 (133.133.133.1 to 100.1.0.33)
+* On linecard 2, send ping from:
+ * Inband interface F1 to inband interface F0 (133.133.133.5 to 133.133.133.1)
+ * Inband interface F1 to interface D (133.133.133.5 to 10.0.0.64)
+ * Inband interface F1 to neighbor on port A (133.133.133.5 to 10.0.0.1)
+ * Inband interface F1 to neighbor on port D (133.133.133.5 to 10.0.0.65)
+ * Inband interface F1 to routed loopback from router 01T3 (133.133.133.5 to 100.1.0.1)
+ * Inband interface F1 to routed loopback from router 01T1 (133.133.133.5 to 100.1.0.33)
+* On Router 01T3, send ping from:
+ * Router loopback interface to DUT inband address on linecard 1. (100.1.0.1 to 133.133.133.1)
+ * Router loopback interface to DUT inband address on linecard 2. (100.1.0.1 to 133.133.133.5)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 6. Line card loopback interface connectivity
+##### Test Objective
+Verify IP Connectivity to DUT loopback addresses.
+##### Test Steps
+* On linecard 1 send ping from:
+ * Loopback to IP interface of port D (11.1.0.1 to 10.0.0.64)
+ * Loopback to neighbor on port D (11.1.0.1 to 10.0.0.65)
+ * Loopback to routed loopback address (11.1.0.1 to 100.1.0.1)
+ * Loopback to routed loopback address (11.1.0.1 to 100.1.0.33)
+* On Router 01T3, send ping from:
+ * Router loopback interface to DUT loopback address on linecard 1. (100.1.0.1 to 11.1.0.1)
+ * Router loopback interface to DUT loopback address on linecard 2. (100.1.0.1 to 11.1.0.2)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 7. End to End traffic.
+##### Test Objective
+Verify end to end routing IPv4/v6, packet sizes, ttl(0,1,2,255)
+##### Test Steps
+* On Router 1, send ping from:
+ * End to end port A to B, ports on same linecard. (100.1.0.1 to 100.1.0.2)
+ * End to end port A to D, ports across multiple linecards. (100.1.0.1 to 100.1.0.33)
+* Repeat for TTL 0,1,2,255
+* Repeat for 64, 1500, 9100B packets
+* Repeat for IPv6
+
+#### Test Case 8. Front Panel port link flap
+##### Test Objective
+Traffic to Sonic host interfaces recovers after the front panel port flaps.
+##### Test Steps
+* Admin down interface on fanout connected to DUT port A to cause LOS on DUT.
+* On linecard 1 verify ping is successful from:
+ * DUT IP Interface B to DUT Interface D
+ * DUT Neighbor IP B to DUT Neighbor IP D
+* On Router 02T3, verify ping is successful from Router Interface to DUT IP Interface B and D.
+* On linecard 1, verify ping fails from:
+ * DUT IP Interface A to DUT IP interface B and D.
+ * DUT IP Interface A to attached neighbor.
+* On Router 01T3, verify ping fails to all DUT addresses.
+* On fanout switch, admin up the downed interface.
+* Validate all traffic flows are correct as in test cases 2-7.
+* Retry traffic with TTL 0,1,2,255
+* Retry traffic with 64, 1500, 9100B packets
+* Retry traffic with IPv6
+
+## VLAN Inband Mode
+
+#### Test Case 1. Inband VLAN mode configuration.
+##### Test Objective
+Verify system initialization in Inband VLAN mode.
+##### Test Steps
+* Verify vlan inband interface is used when in this mode.
+* Verify correct VLAN ID is used on all nodes.
+* On each linecard, verify inband VLAN router interfaces are present in ASICDB
+* On supervisor card, verify inband VLAN router interfaces are present in Chassis App DB
+
+#### Test Case 2. Inband VLAN neighbors
+##### Test Objective
+Verify neighbor adjacency as in [arp](#arp). Inband port will be replaced with VLAN interface as neighbor interface.
+##### Test Steps
+* Repeat tests for:
+ * Local neighbor learning,
+ * remote neighbor learning and route creation
+ * timeout and clearing of neighbors
+
+#### Test Case 3. Inband VLAN host connectivity
+##### Test Objective
+Verify host reachability as in [Host IP Connectivity](#ipfwd). VLAN interface will replace inband port as next hop.
+##### Test Steps
+* Repeat traffic tests for:
+ * router interface to remote ports,
+ * router interface to local and remote neighbors,
+ * router interface to learned routes.
+ * inband interface to all addresses.
+ * DUT loopback interface to all addresses.
+
+#### Test Case 4. Mode Switch.
+##### Test Objective
+Verify VoQ system can be switched between modes when configuration is replaced.
+##### Test Steps
+* Regenerate configuration of VoQ system, switching device from inband port to inband VLAN.
+* Reboot the chassis.
+* Verify system is stable in new mode.
+* Restore to inband port mode.
diff --git a/docs/testplan/Img/Failover_convergence.png b/docs/testplan/Img/Failover_convergence.png
new file mode 100644
index 00000000000..9d710dfd3e4
Binary files /dev/null and b/docs/testplan/Img/Failover_convergence.png differ
diff --git a/docs/testplan/Img/Multi_link_failure.png b/docs/testplan/Img/Multi_link_failure.png
new file mode 100644
index 00000000000..c560171b644
Binary files /dev/null and b/docs/testplan/Img/Multi_link_failure.png differ
diff --git a/docs/testplan/Img/Multiple_Remote_Link_Failure.png b/docs/testplan/Img/Multiple_Remote_Link_Failure.png
new file mode 100644
index 00000000000..c6c0583dd42
Binary files /dev/null and b/docs/testplan/Img/Multiple_Remote_Link_Failure.png differ
diff --git a/docs/testplan/Img/RIB-IN-Convergence_Topology.png b/docs/testplan/Img/RIB-IN-Convergence_Topology.png
new file mode 100644
index 00000000000..d822b2a43cf
Binary files /dev/null and b/docs/testplan/Img/RIB-IN-Convergence_Topology.png differ
diff --git a/docs/testplan/Img/RIB-IN_Capacity_Test.png b/docs/testplan/Img/RIB-IN_Capacity_Test.png
new file mode 100644
index 00000000000..133adb324c9
Binary files /dev/null and b/docs/testplan/Img/RIB-IN_Capacity_Test.png differ
diff --git a/docs/testplan/Img/RIB-IN_Convergence_graph.png b/docs/testplan/Img/RIB-IN_Convergence_graph.png
new file mode 100644
index 00000000000..6c57bcad7b0
Binary files /dev/null and b/docs/testplan/Img/RIB-IN_Convergence_graph.png differ
diff --git a/docs/testplan/Img/RIB-IN_convergence_test.png b/docs/testplan/Img/RIB-IN_convergence_test.png
new file mode 100644
index 00000000000..a0237853e7e
Binary files /dev/null and b/docs/testplan/Img/RIB-IN_convergence_test.png differ
diff --git a/docs/testplan/Img/Single_DUT_Topology.png b/docs/testplan/Img/Single_DUT_Topology.png
new file mode 100644
index 00000000000..d27866ebfb0
Binary files /dev/null and b/docs/testplan/Img/Single_DUT_Topology.png differ
diff --git a/docs/testplan/Img/Single_Link_Failure.png b/docs/testplan/Img/Single_Link_Failure.png
new file mode 100644
index 00000000000..7cfc87ea038
Binary files /dev/null and b/docs/testplan/Img/Single_Link_Failure.png differ
diff --git a/docs/testplan/Img/Single_Remote_Link_Failure.png b/docs/testplan/Img/Single_Remote_Link_Failure.png
new file mode 100644
index 00000000000..1303c68b7d1
Binary files /dev/null and b/docs/testplan/Img/Single_Remote_Link_Failure.png differ
diff --git a/docs/testplan/Img/Switch_acting_as_leaf.png b/docs/testplan/Img/Switch_acting_as_leaf.png
new file mode 100644
index 00000000000..4461e3cf5cd
Binary files /dev/null and b/docs/testplan/Img/Switch_acting_as_leaf.png differ
diff --git a/docs/testplan/Img/Switch_as_ToR.png b/docs/testplan/Img/Switch_as_ToR.png
new file mode 100644
index 00000000000..dc15218b7e7
Binary files /dev/null and b/docs/testplan/Img/Switch_as_ToR.png differ
diff --git a/docs/testplan/Img/anvl-test-methodology.png b/docs/testplan/Img/anvl-test-methodology.png
new file mode 100644
index 00000000000..a48b9c8be4a
Binary files /dev/null and b/docs/testplan/Img/anvl-test-methodology.png differ
diff --git a/docs/testplan/Img/anvl-testbed.png b/docs/testplan/Img/anvl-testbed.png
new file mode 100644
index 00000000000..6ad133b43cb
Binary files /dev/null and b/docs/testplan/Img/anvl-testbed.png differ
diff --git a/docs/testplan/SNMP-memory-test-plan.md b/docs/testplan/SNMP-memory-test-plan.md
new file mode 100644
index 00000000000..c6b9e51b14c
--- /dev/null
+++ b/docs/testplan/SNMP-memory-test-plan.md
@@ -0,0 +1,31 @@
+# SNMP-memory test plan
+
+* [Overview](#Overview)
+ * [Scope](#Scope)
+ * [Testbed](#Testbed)
+* [Setup configuration](#Setup%20configuration)
+* [Test cases](#Test%20cases)
+
+## Overview
+The purpose is to test that SNMP memory MIB objects are functioning properly on the SONIC switch DUT.
+
+### Scope
+The test is targeting a running SONIC system with fully functioning configuration. The purpose of the test is not to test specific API, but functional testing of SNMP on SONIC system.
+
+### Testbed
+The test will run on any testbeds.
+
+## Setup configuration
+This test requires no specific setup.
+
+## Test
+Retrieve facts for a device using SNMP, and compare it to system values.
+
+## Test cases
+### Test case test_snmp_memory
+#### Test steps
+* Retrieve facts for a device using SNMP
+* Get expected values for a device from system
+* Compare that facts received by SNMP are equal to values received from system
+* Run script that should slightly load RAM memory
+* Retrieve facts for a device using SNMP, verify that SNMP value differs from system data less that 4%
diff --git a/docs/testplan/SNMP-v2mib-test-plan.md b/docs/testplan/SNMP-v2mib-test-plan.md
new file mode 100644
index 00000000000..d04fc4ddbdd
--- /dev/null
+++ b/docs/testplan/SNMP-v2mib-test-plan.md
@@ -0,0 +1,29 @@
+# SNMP-v2mib test plan
+
+* [Overview](#Overview)
+ * [Scope](#Scope)
+ * [Testbed](#Testbed)
+* [Setup configuration](#Setup%20configuration)
+* [Test cases](#Test%20cases)
+
+## Overview
+The purpose is to test that SNMPv2-MIB objects are functioning properly on the SONIC switch DUT.
+
+### Scope
+The test is targeting a running SONIC system with fully functioning configuration. The purpose of the test is not to test specific API, but functional testing of SNMP on SONIC system.
+
+### Testbed
+The test will run on any testbeds.
+
+## Setup configuration
+This test requires no specific setup.
+
+## Test
+Retrieve facts for a device using SNMP, and compare it to system values.
+
+## Test cases
+### Test case test_snmp_v2mib
+#### Test steps
+* Retrieve facts for a device using SNMP
+* Get expected values for a device from system.
+* Compare that facts received by SNMP are equal to values received from system.
diff --git a/docs/testplan/console/console_test_hld.md b/docs/testplan/console/console_test_hld.md
new file mode 100644
index 00000000000..bfa41150bad
--- /dev/null
+++ b/docs/testplan/console/console_test_hld.md
@@ -0,0 +1,124 @@
+# Console Switch Test Plan
+
+- [Console Switch Test Plan](#console-switch-test-plan)
+ * [1 Background](#1-background)
+ * [2 Scope](#2-scope)
+ * [3 Testbed Setup](#3-testbed-setup)
+ + [3.1 DUT Wiring](#31-dut-wiring)
+ + [3.2 Console Switch Wiring (Loopback Mode)](#32-console-switch-wiring--loopback-mode-)
+ - [3.2.1 General Test Wiring](#321-general-test-wiring)
+ - [3.2.2 Stress Test Wiring](#322-stress-test-wiring)
+ * [4 Test Cases](#4-test-cases)
+ + [4.1 Driver Test](#41-driver-test)
+ + [4.2 udev Rule Test](#42-udev-rule-test)
+ + [4.3 Reverse SSH](#43-reverse-ssh)
+ + [4.4 Loopback Test](#44-loopback-test)
+ + [4.5 Stress Test](#45-stress-test)
+
+## 1 Background
+
+This project introduces the concept of Console Switch to provide pluggable console management function in SONiC just like regular Terminal Server. Unlike the typical terminal server, the console switch is a simple box without any management function, it will provide multiple RS232(or RJ45) ports with USB serial converts. Therefore, a SONiC box is required to connect to the console switch with a USB link and all management function will be in the SONiC box.
+
+For more design detail, you can refer to this HLD: [SONiC Console Switch High Level Design](https://github.com/Azure/SONiC/blob/master/doc/console/SONiC-Console-Switch-High-Level-Design.md)
+
+## 2 Scope
+
+The scope of this test plan is to verify correct end-to-end operation of a console switch configuration and usage. This includes control plane testing to verify correct state on the device under test (DUT) and data plane testing to verify correct serial data transfer.
+
+There are no to much hardware level constrains and no SAI dependence either. In a sense, any SONiC box with SSH accessibility can become a terminal server with console switch. So the main focus will be software layer adaptive:
+
+- Console Switch driver
+- udev Rules
+- Console Switch Configuration
+- Reverse SSH
+
+## 3 Testbed Setup
+
+### 3.1 DUT Wiring
+
+The console switch testbed will reuse current SONiC testbed setup. In addition, a console switch will attached to the DUT via USB link.
+
+Below is the wiring diagram:
+
+
+
+There are two USB ports in console switch's panel, the master port will be used to connect the SONiC box.
+
+For different test purpose, the console switch ports need to apply different wiring pattern. Since the virtual serial bridge [RFC 2217](https://tools.ietf.org/html/rfc2217.html) need both server and client installed on two sides, the Console Switch test plan will only introduce the Loopback mode testbed.
+
+### 3.2 Console Switch Wiring (Loopback Mode)
+
+The Console Switch have 48 RS232(or RJ45) ports divided into three block in panel.
+
+Loopback Module is a special serial link which connected its own `TXD<->RXD`, `RTS<->CTS`, `GND<->GND`. Below is a sample:
+
+
+
+#### 3.2.1 General Test Wiring
+
+The wiring pattern is shown below:
+
+
+
+- Port 1-16 will attach Loopback Module
+- Port 17-18 connect to port 19-20
+- Port 21-22 connect to port 27-28
+- Port 23-24 connect to port 25-26
+- Port 29-30 connect to port 35-36
+- Port 31-32 connect to port 33-34
+- Port 37-48 will reserved for future virtual serial bridge testing
+
+#### 3.2.2 Stress Test Wiring
+
+The wiring pattern is shown below:
+
+
+
+- Port 1-48 will attach Loopback Module
+
+## 4 Test Cases
+
+### 4.1 Driver Test
+
+**Apply General Test Wiring**
+
+| Case | Objective | Test Setup | Expected Control Plane | Expected Data Plane |
+|-|-|-|-|-|
+| Availability | Verify ttyUSB(0-47) are presented in DUT | - | All 48 ttyUSB* devices are presented in DUT | - |
+
+### 4.2 udev Rule Test
+
+**Apply General Test Wiring**
+
+| Case | Objective | Test Setup | Expected Control Plane | Expected Data Plane |
+|-|-|-|-|-|
+| Port Mapping | Verify ttyUSB(0-47) are mapped to C0-(1-48) | - | All 48 C0-* devices are presented in DUT | - |
+| Port Shifting Prevent | Verify C0-(33-48) are still remained after unbind some ttyUSB devices | Add temporary udev rule to mask ports 1-8, then unbind/bind the root USB hub(1-1.1) | ttyUSB(0-39) and C0-(9-48) devices are presented in DUT | - |
+
+### 4.3 Reverse SSH
+
+**Apply General Test Wiring**
+
+| Case | Objective | Test Setup | Expected Control Plane | Expected Data Plane |
+|-|-|-|-|-|
+| Connectivity | Verify serial session is available after connect DUT via reverse SSH | Connect DUT serial port via reverse SSH | The session is up and can exit by sending ctrl-A + ctrl-X. The DUT show that port is busy during session and idle after exited. | - |
+| Force Interrupt | Verify active serial session can be shut by DUT | Connect DUT serial port `A` via reverse SSH then connect to DUT and clear port `A` | The session ended within 5s and the line state is idle | - |
+
+### 4.4 Loopback Test
+
+**Apply General Test Wiring**
+
+| Case | Objective | Test Setup | Expected Control Plane | Expected Data Plane |
+|-|-|-|-|-|
+| Echo | Verify data can go out through the console switch and come back through the console switch | Send random string to ports 1-16 | - | All sent data echoed back |
+| Ping-Pong | Verify data can go out through the console switch and come back through the console switch | Listening a serial port `A` and send "pong" after "ping" came, then send "ping" to opposite port `B` (Port Range: 17-36) | - | Got "pong" back from `B` |
+| File Transfer | Verify file can be transferred through the console switch | Ensure there is no `config_db.json` under `\var\tmp` directory. Forward `tcp:localhost:22` to port `A`, send file `\etc\sonic\config_db.json` to opposite port `B` and store it in `\var\tmp\config_db.json` | - | The md5 hash between `\etc\sonic\config_db.json` and `\var\tmp\config_db.json` are same |
+
+### 4.5 Stress Test
+
+**Apply Stress Test Wiring**
+
+| Case | Objective | Test Setup | Expected Control Plane | Expected Data Plane |
+|-|-|-|-|-|
+| Echo(30, 9600) | Verify the DUT CPU load not exceed the expectation | Send random string to ports 1-48 continually within 60s; The first parameter is the random string size in a single transfer, the second parameter is the BAUD rate | Avg CPU < 75%; Actual Speed > 800B/s | All sent data echoed back |
+| Echo(3000, 9600) | Same above | Same above | Avg CPU < 65%; Actual Speed > 900B/s | All sent data echoed back |
\ No newline at end of file
diff --git a/docs/testplan/console/img/console_switch_wiring_general.png b/docs/testplan/console/img/console_switch_wiring_general.png
new file mode 100644
index 00000000000..ef4415a8fba
Binary files /dev/null and b/docs/testplan/console/img/console_switch_wiring_general.png differ
diff --git a/docs/testplan/console/img/console_switch_wiring_stress.png b/docs/testplan/console/img/console_switch_wiring_stress.png
new file mode 100644
index 00000000000..8369cae85f8
Binary files /dev/null and b/docs/testplan/console/img/console_switch_wiring_stress.png differ
diff --git a/docs/testplan/console/img/dut_wiring.png b/docs/testplan/console/img/dut_wiring.png
new file mode 100644
index 00000000000..43bf590d13c
Binary files /dev/null and b/docs/testplan/console/img/dut_wiring.png differ
diff --git a/docs/testplan/console/img/loopback_module.png b/docs/testplan/console/img/loopback_module.png
new file mode 100644
index 00000000000..6c4028a95cc
Binary files /dev/null and b/docs/testplan/console/img/loopback_module.png differ
diff --git a/docs/testplan/dual_tor/dual_tor_orch_test_plan.md b/docs/testplan/dual_tor/dual_tor_orch_test_plan.md
new file mode 100644
index 00000000000..f0198014efa
--- /dev/null
+++ b/docs/testplan/dual_tor/dual_tor_orch_test_plan.md
@@ -0,0 +1,123 @@
+# Dual ToR Orchagent Test Plan
+
+### Scope
+
+The scope of this test plan is to verify correct tunnel, ecmp, acl behavior. This is a continuation to dual_tor_test_hld.md and please refer this document for more details on topology etc.
+
+Standby ToR refers to a ToR scenario in which the packet is destined to/from server where the Mux is in Standby Mode.
+Active ToR refers to a ToR scenario in which the packet is destined to/from server where the Mux is in Active Mode.
+
+### Testbed Setup
+
+The test can be executed on a single Tor testbed with proposed configurations that simulates the active/standby mux behavior
+
+### Config command:
+
+Example mux cable config in Config DB
+```
+{
+ "MUX_CABLE":{
+ "Ethernet4":{
+ "server_ipv4":"192.168.0.100/32",
+ "server_ipv6":"fc02:1000::100/80"
+ }
+}
+```
+
+The following command can be used to set a mux port to standby/active via swssconfig
+
+```
+muxstandby.json
+[
+ {
+ "MUX_CABLE_TABLE:Ethernet4" : {
+ "state":"standby"
+ },
+ "OP": "SET"
+ }
+]
+
+docker exec swss sh -c \"swssconfig /muxstandby.json\"
+```
+
+Neigbhor add:
+
+```
+ip -4 neigh add 192.168.0.2 lladdr 00:00:11:22:33:44 dev Vlan1000
+```
+
+Neighbor flush:
+
+```
+ip -4 neigh del 192.168.0.2 dev Vlan1000
+```
+
+Loopback (Peer Switch) route add
+
+```
+ip route add 1.1.1.1 nexthop via 10.0.0.57 nexthop via 10.0.0.59 nexthop via 10.0.0.61
+```
+
+## Test Cases
+
+1. T1 -> Standby ToR
+
+ Send traffic of varying tuple destined to server under standby mux. The following are the various steps and those which have to be executed in a sequence is grouped together.
+
+ | Step | Goal | Expected results |
+ |-|-|-|
+ | All ports to T1s are up; Loopback route configured | ECMP hashing | Verify tunnel traffic to Active ToR is distributed equally across nexthops; Verify no traffic is forwarded to downlink in case of standby mux |
+ | Shutdown one uplink to T1 | ECMP hashing/CRM | Verify traffic is shifted to the active links and no traffic drop observed; Verify CRM that no new nexthop created|
+ | Bring back the uplink to T1 | ECMP hashing/CRM | Verify traffic is now equally distributed; Verify CRM that no new nexthop created |
+ ||||
+ | Shutdown one BGP session to T1 | ECMP hashing/CRM | Verify traffic is shifted to the active links and no traffic drop observed; Verify CRM that no new nexthop created|
+ | Bring back BGP session to T1 | ECMP hashing/CRM | Verify traffic is now equally distributed; Verify CRM that no new entries created |
+ ||||
+ | Server Neighbor entry flushed/relearnt | Standby Forwarding | Verify no impact to tunnel traffic and no traffic fwded to neighbor directly; Verify CRM for neighbor |
+ ||||
+ | Remove Loopback route | ECMP hashing | Verify traffic is equally distributed via default route|
+ | Re-add Loopback route | ECMP hashing | Verify traffic is equally distributed via loopback route|
+
+
+2. Server -> Standby ToR
+
+ For the CRM tests, it is expected to read the values before and after test and compare the resource count for usage/leaks.
+
+ | Step | Goal | Expected results |
+ |-|-|-|
+ | Mux state in Standby | ACL | Verify traffic is dropped by ACL rule and drop counters incremented |
+ | Simulate Mux state change to active | ACL/CRM | Verify traffic is not dropped by ACL and fwd-ed to uplinks; Verify CRM show and no nexthop objects are stale |
+ | Simulate Mux state change to standby | ACL/CRM | Verify traffic is dropped by ACL; Verify CRM show and no nexthop objects are stale |
+
+3. T1 -> Active ToR
+
+ Send traffic to server under active mux.
+
+ | Step | Goal | Expected results |
+ |-|-|-|
+ | Neighbor learnt | Forwarding | Verify no tunnel traffic for Active mux. All traffic to server should be directly forwarded; Verify CRM for neighbor |
+ | Neighbor flushed | Drop | Verify no tunnel traffic but packets are dropped; Verify CRM for neighbor |
+ | Neighbor re-learnt | Forwarding | Verify no tunnel traffic and packets are fwded |
+
+4. T1 -> Tor (IPinIP packet)
+
+ Send IPinIP encapsulated packet. Configure some ports in Active, some ports in Standby mode
+
+ | Step | Goal | Expected results |
+ |-|-|-|
+ | Outer IP as loopback, Inner Dst IP as Active Server IP | Decap | Verify traffic is decapsulated and fwded to Server port |
+ ||||
+ | Outer IP as loopback, Inner Dst IP as Standby Server IP | Decap | Verify traffic is not fwded to Server port or re-encapsulated to T1s |
+
+5. Stress test
+
+ Continous mux state change based on configurable parameter 'N'
+
+ | Step | Goal | Expected results |
+ |-|-|-|
+ | Change mux state from Active->Standby->Active 'N' times | CRM | Verify CRM values for routes/nexthop and check for leaks |
+ ||||
+ | Flush and re-learn Neighbor entry 'N' times in Standby state | CRM | Verify CRM values for routes/neighbor/nexthop and check for leaks |
+ ||||
+ | Flush and re-learn Neighbor entry 'N' times in Active state | CRM | Verify CRM values for routes/neighbor/nexthop and check for leaks |
+
diff --git a/docs/testplan/dual_tor/dual_tor_test_hld.md b/docs/testplan/dual_tor/dual_tor_test_hld.md
new file mode 100755
index 00000000000..09b71b1f79e
--- /dev/null
+++ b/docs/testplan/dual_tor/dual_tor_test_hld.md
@@ -0,0 +1,133 @@
+# Dual ToR Test Plan
+
+## Background
+
+This project introduces the concept of dual top of rack (ToR) switches to provide increased network redundancy for servers. Instead of the standard single ToR switch connecting a rack of servers to a datacenter network, dual ToR setups have two redundant ToR switches. In a rack utilizing a dual ToR setup, every server is connected to both ToRs via a special cable. This cable (known as a mux cable or smart cable) has three ends; two ends are connected to the ToR switches, and the third end connects to the server. The cable also contains a built in mux chip, which is able to select from which ToR the server receives incoming traffic. In the event of a link failure on a single ToR (or failure of an entire ToR), dual ToR setups will enable normal operation to continue via the remaining ToR.
+
+### Scope
+
+The scope of this test plan is to verify correct end-to-end operation of a dual ToR configuration. This includes control plane testing to verify correct state on the devices under test (DUTs) and data plane testing to verify correct packet flow between T1 switches, the ToRs, and the servers.
+
+The following are out of scope for this test plane:
+
+* Mux cable operation
+* Individual component behavior (i.e. verifying that CLI commands write to config DB properly)
+
+### Testbed Setup
+
+The dual ToR testbed setup is similar to the regular SONiC testbed setup in many ways. Arista VMs will still be used to emulate T1 switches, and PTF docker containers will be used to emulate servers.
+
+Both ToRs will be connected to all 4 T1 VMs similar to existing testbeds. However, the downstream ports of the ToR will be connected to smart cables, which are in turn connected to the PTF container. The corresponding ports on each ToR will be connected to the same cable, e.g. port A on both switches is connected to cable A, which is connected to some server A.
+
+The ToR switches will also be connected by an IP-in-IP tunnel, which facilitates forwarding of traffic from the standby ToR to the active ToR
+
+Since the physical smart cables that will eventually be used when dual ToR setups enter production are not yet available, Open vSwitch (OVS) will be used to emulate the mux cable behavior. One OVS bridge will be used to emulate a single mux cable, which will allow for full emulation of cable behavior.
+
+The full testbed setup is shown below:
+
+
+
+### OVS Bridge Verification
+
+Since testing of the mux cable is out of scope of this test plan, the test cases described here are written under the assumption that the mux cable is operating correctly. In order to validate this assumption in the dual ToR testbed, a pre-test check is needed to verify the correct operation of the bridge. This check can be included as a setup method for the test classes, or as part of the pre-test sanity check. The check involves the following steps:
+
+1. Attempt to ping (ICMP echo request) the server from both ToRs
+2. Verify that the server received only the request from the active ToR
+3. Send an ICMP echo reply from the server
+4. Verify that both ToRs received the reply
+
+## Test Cases
+
+These tests verify that packet flow behaves as expected following configuration changes or changes to the cable state (e.g. link failure)
+
+The three main traffic scenarios (and the expected packet flows) are as follows:
+
+1. Server -> ToR -> T1
+
+ In this scenario, a server sends traffic to a T1 switch. The traffic should pass through the mux cable, which should forward the packets to both ToRs. The standby ToR is expected to drop the packets, and the active ToR is expected to forward these packets to the T1 switch:
+
+ 
+
+ For all test cases in this packet flow scenario, the main step of the test will involve sending traffic from a server to a T1 switch. Let ToR A be the active ToR at the start of each case, and ToR B be the standby ToR. The test cases, setup steps, and expected results are described below:
+
+ | Case | Goal | Test Setup | Expected Control Plane | Expected Data Plane |
+ |-|-|-|-|-|
+ | Healthy | Verify normal operation | None | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives packet from the active ToR (A), and not the standby ToR (B) |
+ | Active Config Reload | Verify normal operation after performing config reload on the active ToR| Change some dual ToR feature configurations, then `config reload` active ToR | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B), and not the standby ToR (A) |
+ | Standby Config Reload | Verify normal operation after performing config reload on the standby ToR | Change some dual ToR feature configurations, then `config reload` standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives packet from the active ToR (A), and not the standby ToR (B) |
+ | Active ToR Switch | Verify packet flow after manually changing the active ToR | Set the standby ToR to active via CLI commands | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Active Link Down | Verify packet flow after the link from the server to the active ToR (A) goes down | Take down the link between the active ToR and the server | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Standby Link Down | Verify packet flow after the link from the server to the standby ToR (B) goes down | Take down the link between the standby ToR and the server | ToR A DBs indicate active, ToR B DBs indicate standby (no changes) | T1 switch receives packet from the active ToR (A) and not the standby ToR (B) |
+ | Active Link Drop | Verify packet flow if the link from the server to the active ToR (A) starts dropping packets, but stays up | Configure the OVS bridge to drop packets destined for the active ToR | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Standby Link Drop | Verify packet flow if the link from the server to the standby ToR (B) starts dropping packets, but stays up | Configure the OVS bridge to drop packets destined for the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives packet from the active ToR (A) and not the standby ToR (B) |
+ | Active ToR BGP Down | Verify packet flow after the active ToR (A) loses BGP sessions | Shutdown all BGP sessions on the active ToR | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Standby ToR BGP Down | Verify packet flow after the standby ToR (B) loses BGP sessions | Shutdown all BGP sessions on the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives packet from the active ToR (A), and not the standby ToR (B) |
+ | Active ToR Heartbeat Loss | Verify packet flow after the active ToR (A)stops sending a heartbeat | Stop the LinkProber submodule on the active ToR | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Standby ToR Heartbeat Loss | Verify packet flow after the standby ToR (B) stops sending a heartbeat | Stop the LinkProber submodule on the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives packet from the active ToR (A) and not the standby ToR (B) |
+ | Active ToR Failure | Verify packet flow after a simulated failure of the active ToR (A) | Configure active ToR to blackhole all traffic | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Standby ToR Failure | Verify packet flow after a simulated failure of the standby ToR (B) | Configure standby ToR to blackhole all traffic | ToR A DBs indicate active, ToR B DBs indicate standby and unhealthy Link | T1 switch receives packet from the active ToR (A) and not the standby ToR (B) |
+ | Active ToR Reboot | Verify packet flow after the active ToR (A) reboots | Reboot the active ToR | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives packet from the new active ToR (B) and not the new standby ToR (A); verify traffic interruption < 1 second |
+ | Standby ToR Reboot | Verify packet flow after the standby ToR (B) reboots | Reboot the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receive packet from the active ToR (A) and not the standby ToR (B) |
+
+2. T1 -> Active ToR -> Server
+
+ In this scenario, a T1 switch sends traffic to the server via the active ToR. During normal operation, the T1 can send server-bound traffic to either ToR due to ECMP routing. However for the purposes of the test, the T1 switch will explicitly send traffic to only the active ToR (by sending packets from the port connected to the active ToR), which will then forward it to the server:
+
+ 
+
+ Let ToR A be the active ToR at the start of each case, and ToR B be the standby ToR. For all test cases in this packet flow scenario, the main step of the test will involve sending traffic from a T1 switch to the server via ToR A. In addition, every test case must also check that no IP-in-IP packet was received on the T1 switch to verify that the active ToR is not duplicating packets and sending them to the standby ToR. The test cases, setup steps, and expected results are described below:
+
+ | Case | Goal | Test Setup | Expected Control Plane | Expected Data Plane |
+ |-|-|-|-|-|
+ | Healthy | Verify normal operation | None | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives no IP-in-IP packet; server receives packet |
+ | Active Config Reload | Out of scope: since config reload disrupts BGP sessions, T1 switch will never send traffic to this ToR | None | None | None |
+ | Standby Config Reload | Verify normal operation after performing config reload on the standby ToR | Change some dual ToR feature configurations, then `config reload` standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives no IP-in-IP packet; server receives packet|
+ | Active ToR Switch | Verify packet flow after manually changing the active ToR | Set the standby ToR to active via CLI commands | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives IP-in-IP packet with correct attributes (from ToR A to ToR B); server receives packet; verify traffic interruption < 1 second |
+ | Active Link Down | Verify packet flow after the link from the server to the active ToR (A) goes down | Take down the link between the active ToR (A) and the server | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives IP-in-IP packet with correct attributes (from ToR A to ToR B); server receives packet; verify traffic interruption < 1 second |
+ | Standby Link Down | Verify packet flow after the link from the server to the standby ToR (B) goes down | Take down the link between the standby ToR and the server | ToR A DBs indicate active, ToR B DBs indicate standby (no changes) | T1 switch receives no IP-in-IP packet; server receives packet |
+ | Active Link Drop | Verify packet flow if the link from the server to the active ToR (A) starts dropping packets, but stays up | Configure the OVS bridge to drop packets originating from the active ToR | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives IP-in-IP packet with correct attributes (from ToR A to ToR B); server receives packet; verify traffic interruption < 1 second |
+ | Standby Link Drop | Verify packet flow if the link from the server to the standby ToR (B) starts dropping packets, but stays up | Configure the OVS bridge to drop packets originating from the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives no IP-in-IP packet; server receives packet |
+ | Active ToR BGP Down | Out of scope: taking down the active ToR's (A) BGP sessions means the T1 will never send traffic to that ToR | None | None | None |
+ | Standby ToR BGP Down | Verify packet flow after the standby ToR (B) loses BGP sessions | Shutdown all BGP sessions on the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives no IP-in-IP packet; server receives packet |
+ | Active ToR Heartbeat Loss | Verify packet flow after the active ToR (A) stops sending a heartbeat | Stop the LinkProber submodule on the active ToR | ToR A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives IP-in-IP packet with correct attributes (from ToR A to ToR B); server receives packet; verify traffic interruption < 1 second |
+ | Standby ToR Heartbeat Loss | Verify packet flow after the standby ToR (B) stops sending a heartbeat | Stop the LinkProber submodule on the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives no IP-in-IP packet; server receives packet |
+ | Active ToR Failure | Out of scope: taking down the active ToR's (A) BGP sessions means the T1 will never send traffic to that ToR | None | None | None |
+ | Standby ToR Failure | Verify packet flow after a simulated failure of the standby ToR (B) | Configure standby ToR to blackhole all traffic | ToR A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives no IP-in-IP packet; server receives packet |
+ | Active ToR Reboot | Out of scope: rebooting the active ToR means the T1 never sends traffic to the ToR | None | None | None |
+ | Standby ToR Reboot | Verify packet flow after the standby ToR (B) reboots | Reboot the standby ToR | ToR A DBs indicate active, ToR B DBs indicate standby | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption < 1 second |
+
+3. T1 -> Standby ToR -> Server
+
+ In this scenario, a T1 switch sends traffic to a server via the standby ToR. The standby ToR should forward the traffic to the active ToR through the IP-in-IP tunnel. The active ToR then fowards the traffic to the server. Similar to the `T1 -> Active ToR -> Server` scenario, the T1 will explicitly send traffic only to the standby ToR:
+
+ 
+
+ For all test cases in this packet flow scenario, the main step of the test will involve sending traffic from a T1 switch to the server via the standby ToR. In addition, all test cases need to check that T1 switch receives an IP-in-IP packet with the correct attributes which indicates the standby ToR is forwarding southbound packets to the active ToR. The test cases, setup steps, and expected results are described below:
+
+ | Case | Goal | Test Setup | Expected Control Plane | Expected Data Plane |
+ |-|-|-|-|-|
+ | Healthy | Verify normal operation | None | Tor A DBs indicate active, ToR B DBs indicate standby | T1 Switch receives IP-in-IP packet with correct attributes (from ToR B to ToR A); server receives packet |
+ | Active Config Reload | Verify normal operation after performing config reload on the active ToR | Change some dual ToR feature configurations, then `config reload` active ToR | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet|
+ | Standby Config Reload | Out of scope: since config reload disrupts BGP sessions, T1 switch will never send traffic to this ToR | None | None | None |
+ | Active ToR Switch | Verify packet flow after manually changing the active ToR | Set the standby ToR to active via CLI commands | Tor A DBs indicate standby, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption is < 1 second |
+ | Active Link Down | Verify packet flow after the link from the server to the active ToR (A) goes down | Take down the link between the active ToR and the server | Tor A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption is < 1 second |
+ | Standby Link Down | Verify packet flow after the link from the server to the standby ToR (B) goes down | Take down the link between the standby ToR and the server | Tor A DBs indicate active, ToR B DBs indicate standby (no changes) | T1 switch receives IP-in-IP packet with correct attributes (from ToR B to ToR A); server receives packet |
+ | Active Link Drop | Verify packet flow if the link from the server to the active ToR (A) starts dropping packets, but stays up | Configure the OVS bridge to drop packets originating from the active ToR | Tor A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption is < 1 second |
+ | Standby Link Drop | Verify packet flow if the link from the server to the standby ToR (B) starts dropping packets, but stays up | Configure the OVS bridge to drop packets originating from the standby ToR | Tor A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives IP-in-IP packet with correct attributes (from ToR B to ToR A); server receives packet |
+ | Active ToR BGP Down | Verify packet flow after the active ToR (A) loses BGP sessions | Shutdown all BGP sessions on the active ToR | Tor A DBs indicate standby, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption is < 1 second |
+ | Standby ToR BGP Down | Out of scope: taking down the standby ToR's (B) BGP sessions means the T1 will never send traffic to that ToR | None | None | None |
+ | Active ToR Heartbeat Loss | Verify packet flow after the active ToR (A) stops sending a heartbeat | Stop the LinkProber submodule on the active ToR | Tor A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption is < 1 second |
+ | Standby ToR Heartbeat Loss | Verify packet flow after the standby ToR (B) stops sending a heartbeat | Stop the LinkProber submodule on the standby ToR | Tor A DBs indicate active, ToR B DBs indicate standby and unhealthy link | T1 switch receives IP-in-IP packet with correct attributes (from ToR B to ToR A); server receives packet |
+ | Active ToR Failure | Verify packet flow after a simulated failure of the active ToR (A) | Configure active ToR to blackhole all traffic | Tor A DBs indicate standby and unhealthy link, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption is < 1 second |
+ | Standby ToR Failure | Out of scope: taking down the standby ToR's (B) BGP sessions means the T1 will never send traffic to that ToR | None | None | None |
+ | Active ToR Reboot | Verify packet flow after the active ToR (A) reboots | Reboot the active ToR | ToR A DBs indicate standby, ToR B DBs indicate active | T1 switch receives no IP-in-IP packet; server receives packet; verify traffic interruption < 1 second |
+ | Standby ToR Reboot | Out of scope: rebooting the standby ToR means the T1 never sends traffic to the ToR | None | None | None |
+
+In addition to the three scenarios above, additional tests are necessary to verify correct operation in specific edge cases:
+
+| Case | Goal | Test Steps | Expected Control Plane | Expected Data Plane |
+|-|-|-|-|-|
+| Gratuitous ARP | Verify the standby ToR can learn ARP entries when the server sends an ARP reply to the active ToR | Send an ARP request from the active ToR to the server | Both ToRs learn the ARP reply sent by server | None |
+| Proxy ARP | Verify each ToR can send proxy ARP replies for the other | For servers A and B, with server A having active ToR A and server B having active ToR B, send an ARP request from server A for server B's IP. Then send a packet from server A to server B. | Server A learns ToR A's MAC for server B's IP | T1 switch receives IP-in-IP packet with correct attributes (from ToR A to ToR B); server B receives packet |
+| Server Down | Verify the active and standby ToRs do not flap if the server becomes unresponsive | Stop sending all traffic from the server | ToR states do not flap between active and standby (both should indicate unhealthy mux cables) | None
+| Tunnel Operation | Verify correct behavior of the IP-in-IP tunnel between the ToRs | Encapsulate a packet on ToR A and send through the tunnel to ToR B; do the same for ToR B to ToR A | For both cases, verify that T1 switch sees the correct IP-in-IP packet, and that the destination ToR receives the packet |
diff --git a/docs/testplan/dual_tor/dual_tor_test_plan_action_items.md b/docs/testplan/dual_tor/dual_tor_test_plan_action_items.md
new file mode 100755
index 00000000000..027989cf747
--- /dev/null
+++ b/docs/testplan/dual_tor/dual_tor_test_plan_action_items.md
@@ -0,0 +1,605 @@
+# Dual ToR Test Plan Action Items
+
+This document is subject to change as the project evolves.
+
+Not covered in this doc:
+
+1. Method of interacting with/controlling OVS bridge
+
+## Test Structure
+
+The test structure includes the following files, each corresponding to a section/action item below:
+
+### Non-Test Files
+
+* `cli_config_utils.py`
+* `control_plane_utils.py`
+* `data_plane_utils.py`
+* `link_config_utils.py`
+* `dual_tor_test_utils.py`
+* `tor_config_utils.py`
+
+### Test Files
+
+All of the following test files are dependent on `control_plane_utils.py` and `data_plane_utils.py`. Dependencies on other specific files are listed:
+
+* `test_normal_op.py -> cli_config_utils.py`
+* `test_link_failures.py -> link_config_utils.py`
+* `test_other_cases.py`
+* `test_tor_component_failures.py -> tor_config_utils.py`
+* `test_tor_failures.py -> tor_config_utils.py`
+
+## Non-Test Items
+
+To make the tests cases themselves as simple to write as possible, much of the work in the tests will be offloaded to these helper methods/fixtures that should be used within the body of each test case.
+
+### Dual ToR Test Utilities
+
+General test support methods/fixtures
+
+Need to also add fixtures/methods to get the IPs associated with each port/interface
+
+```python
+def check_ovs_bridges():
+ """
+ Verifies that the OVS bridge is operating correctly as part of the sanity check
+
+ Should call the PTF helper method `ping_server_from_tor`
+
+ Returns:
+ True if the PTF helper returns true, False otherwise
+ """
+
+def ping_server_from_tors():
+ """
+ Helper method for `check_ovs_bridge` to be run on the PTF
+
+ This function should perform the following steps for all servers under the ToR set:
+ 1. Ping the server from each ToR
+ 2. Verify the server receives only the ping from the active ToR
+ 3. Send a reply from the server
+ 4. Verify both ToRs receive the reply
+
+ Returns:
+ True if the check passes, False otherwise
+ """
+
+@pytest.fixture
+def tor_mux_intf():
+ """
+ Returns the interface/port name on the ToR that the mux cable used for testing is connected to (this should be consistent/deterministic between runs)
+
+ Returns:
+ The interface name as a string
+ """
+
+@pytest.fixture
+def ptf_server_intf():
+ """
+ Returns the port corresponding to the server on the PTF used during the test run (this should be consistent/deterministic between test runs, probably by just using the first server every time)
+
+ Returns:
+ The interface name of the server
+ """
+@pytest.fixture
+def t1_upper_tor_intfs():
+ """
+ Returns the PTF interface(s) that the upper ToR is connected to on the T1 (this should be consistent/deterministic between test runs, probably by just using the first T1 every time)
+
+ The upper ToR may also be known as ToR A
+
+ Returns:
+ List containing the interface names on the T1 corresponding to the upper ToR
+ """
+
+@pytest.fixture
+def t1_lower_tor_intfs():
+ """
+ Returns the PTF interface(s) that the lower ToR is connected to on the T1 (this should be consistent/deterministic between test runs, probaobly by just using the first T1 every time)
+
+ The lower ToR may also be known as ToR B
+
+ Returns:
+ List containing the interface names on the T1 corresponding to the lower ToR
+ """
+
+def apply_dual_tor_config(duthost, active=True):
+ """
+ Applies dual ToR configurations to a regular ToR device
+
+ Allows mocking/testing parts of a dual ToR system without requiring a dual ToR testbed
+ See dual ToR orchagent test plan for details.
+ Args:
+ duthost: The host on which to apply the config
+ active: if True, simulate apply an active ToR. If False, apply standby ToR configs
+ """
+```
+
+### CLI Configuration Utilities
+
+```python
+def change_configs_and_config_reload(tor_host):
+ """
+ Make some dual ToR config changes (exact changes pending CLI being finalized) and config reload after (on both ToRs)
+
+ Args:
+ tor_host: DUT host object from duthosts fixture
+ """
+
+def force_active_tor(tor_host, intf):
+ """
+ Manually set `tor_host` to the active ToR for `intf`
+
+ Args:
+ tor_host: DUT host object which will become the active ToR (passed by calling function from duthosts fixture)
+ intf: The interface name to set `tor_host` as active on, or 'all'
+ """
+```
+
+### Link Configuration Utilities
+
+Need to create a fixture to interact with the OVS bridge and add as a parameter to the following methods
+
+```python
+def shutdown_active_tor_mux_link():
+ """
+ Shutdown the link between the OVS bridge and the active ToR
+ """
+
+def shutdown_standby_tor_mux_link():
+ """
+ Shutdown the link between the OVS bridge and the standby ToR
+ """
+
+def drop_active_tor_mux_link(): # No longer needed, this behavior is included in the mux simulator client
+ """
+ Configure the OVS bridge to drop packets between the bridge and the active ToR
+ """
+
+def drop_standby_tor_mux_link(): # No longer needed, this behavior is included in the mux simulator client
+ """
+ Configure the OVS bridge to drop packets between the bridge and the standby ToR
+ """
+```
+
+### ToR Configuration Utilities
+
+```python
+def shutdown_tor_bgp(tor_host):
+ """
+ Shutdown all BGP sessions on `tor_host`
+
+ Args:
+ tor_host: A ToR host object (should be passed by the calling function, from the duthosts fixture)
+ """
+
+def shutdown_tor_heartbeat(tor_host):
+ """
+ Shutdown the LinkProber on `tor_host`
+ """
+
+def simulate_tor_failure(tor_host):
+ """
+ Configure `tor_host` to blackhole all traffic
+ """
+
+def reboot_tor(tor_host):
+ """
+ Reboot `tor_host`
+ """
+```
+
+### Control Plane Utilities
+
+Methods/fixtures used to verify control plane (APP DB/STATE DB) values
+
+```python
+def expect_app_db_values(tor_host, intf_name, state):
+ """
+ Query APP_DB on `tor_host` and check if mux cable fields match the given parameters
+
+ The following tables/fields are checked:
+
+ MUX_CABLE|PORTNAME:
+ - state:
+
+ HW_MUX_CABLE|PORTNAME
+ - state:
+
+ MUX_CABLE_RESPONSE|PORTNAME:
+ - response
+
+ Args:
+ tor_host: DUT host object (needs to be passed by calling function from duthosts fixture)
+ intf_name: The PORTNAME to check in each table
+ state: The expected value for each field in each table listed above.
+
+ Returns:
+ True if actual values match expected, False if not (also should have some mechanism to show the values that don't match, maybe calling `pytest.fail()` with a message)
+ """
+
+def expect_state_db_values(tor_host, intf_name, state, health):
+ """
+ Query STATE_DB on `tor_host` and check if mux cable fields match the given parameters
+
+ The following tables/fields are checked:
+
+ MUX_CABLE_TABLE|PORTNAME:
+ - state:
+ - health:
+
+ HW_MUX_CABLE_TABLE|PORTNAME:
+ - state:
+
+ Args:
+ tor_host: DUT host object (needs to be passed by calling function from duthosts fixture)
+ intf_name: The PORTNAME to check in each table
+ state: The expected value for each of the `state` fields in both tables
+ health: The expected value for the `health` field in the MUX_CABLE_TABLE table
+
+ Returns:
+ True if actual values match expected, False if not (also should have some mechanism to show the values that don't match, maybe calling `pytest.fail()` with a message)
+ """
+```
+
+### Data Plane Utilities
+
+Methods/fixtures used to support data plane operations/verifications
+
+```python
+def send_t1_to_server_after_action(server_port, tor_port, expect_tunnel_packet=False, delay=1, timeout=5, action=None, *args):
+ """
+ Performs `action`, then continuously sends a packet from the T1 to the server every 100ms until timeout or packet is received
+
+ The `delay` is also the maximum allowable traffic interruption time. If after the `delay` the packet cannot be successfully sent, the ToR switchover process is taking too long.
+
+ Should call PTF helper method `send_t1_to_server`
+
+ Args:
+ server_port: Corresponds to the destination server
+ tor_port: Corresponds to the ToR used to send the packet
+ expect_tunnel_packet: Whether or not the T1 should expect to receive a packet through the IP-in-IP tunnel
+ delay: Maximum acceptable delay for traffic to continue flowing again
+ timeout: Time to wait for packet to be transmitted
+ action: Some function which performs the desired action, or `None` if no action/delay is desired
+ *args: Any arguments to be passed to `action`
+ """
+
+def send_t1_to_server(server_port, tor_port, delay=1, timeout=5, expect_tunnel_packet=False):
+ """
+ Helper method for `send_t1_to_server_after_action` to be run on PTF
+
+ Send a packet via `tor_port` to the server, and check for an IP-in-IP packet according to `expect_tunnel_packet`. Also check that the server receives the packet.
+
+ If `expect_tunnel_packet` is `True`, check that the T1 receives an IP-in-IP packet from `tor_port`, and no other ports. If `False`, check that no IP-in-IP packets are received from any port.
+
+ Args:
+ server_port: The port intended to receive the packet
+ tor_port: The port through which to send the packet. Connected to either the upper or lower ToR
+ delay: Maximum acceptable delay for traffic to continue flowing again
+ timeout: Time to wait for packet to be transmitted
+ expect_tunnel_packet: `True` or `False` whether to expect an IP-in-IP tunnel packet
+ """
+
+def send_server_to_t1_after_action(server_port, tor_port, delay=1, timeout=5, action=None, *args):
+ """
+ Performs `action`, then continuously sends a packet from the server to the T1 every 100ms until timeout or packet is received
+
+ The `delay` is also the maximum allowable traffic interruption time. If after the `delay` the packet cannot be successfully sent, the ToR switchover process is taking too long.
+
+ Should call PTF helper method "send_server_to_t1"
+
+ Args:
+ server_port: The port to send the packet through
+ tor_port: The port on the T1 the packet is expected to be received by
+ delay: Maximum acceptable delay for traffic to continue flowing again
+ timeout: Timeout to wait for packet to be received
+ action: Some function which performs the desired action, or `None` if no action/delay is desired
+ *args: Any arguments to be passed to `action`
+ """
+
+def send_server_to_t1(server_port, tor_port, delay=1, timeout=5):
+ """
+ Helper method for `send_server_to_t1_after_action` to be run on PTF
+
+ Send a packet from the server port to the T1, and verify that the packet is received from tor_port and no other ports on the T1
+
+ Args:
+ server_port: The port to send the packet through
+ tor_port: The port to expect the packet from
+ delay: Maximum acceptable delay for traffic to continue flowing again
+ timeout: Time to wait for packet to be transmitted
+ """
+```
+
+## Test Scenarios
+
+All test cases should use either `send_server_to_t1_after_action` or `send_t1_to_server_after_action` to send/verify packets on the data plane, since these methods automatically verify the traffic interruption interval.
+
+The test cases are named according the following format:
+
+```test___```
+
+It's important to note that for consistency, any time the active or standby ToR (and their respective links/ports) are mentioned, they refer to the state of the ToR prior to the test being run. Please check the test plan HLD for clarification.
+
+So a method testing what happens to downstream traffic (T1->server) passing through the standby ToR when the active ToR reboots would be named:
+
+```test_active_tor_reboot_downstream_standby```
+
+And a method testing what happens to upstream traffic (server->T1) when the active ToR loses its BGP sessions would be named:
+
+```test_active_tor_bgp_down_upstream```
+
+For testing purposes, the upper ToR will be set to the active ToR at the beginning of the test (via the mux simulator, not the SONiC CLI command).
+
+Note that the upper/lower ToR designations do not change throughout the test.
+
+### Normal Operation
+
+```python
+def test_normal_oper_upstream(ptf_server_port, t1_upper_tor_port, duthosts, tor_mux_intf):
+ """
+ Verify normal operation of dual ToR setup by sending traffic from the server to the T1
+
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_upper_tor_port)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby', 'healthy')
+ """
+
+def test_normal_oper_downstream_active(ptf_server_port, t1_upper_tor_port):
+ """
+ Verify normal operation of dual ToR setup by sending traffic from the T1 to the server via the active ToR
+
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby', 'healthy')
+ """
+
+def test_normal_oper_downstream_standby(ptf_server_port, t1_lower_tor_port):
+ """
+ Verify normal operation of dual ToR setup by sending traffic from the T1 to the server via the standby ToR
+
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_tor_port, expect_tunnel_packet=True)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby', 'healthy')
+ """
+
+def test_active_config_reload_upstream(ptf_server_port, t1_lower_tor_port, duthosts):
+ """
+ Verify operation of dual ToR setup after active ToR config reload by sending traffic from the server to the T1
+
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_lower_tor_port, action=change_configs_and_config_reload, upper_tor)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'standby')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'standby', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_standby_config_reload_upstream(ptf_server_port, t1_upper_tor_port, duthosts):
+ """
+ Verify operation of dual ToR setup after standby ToR config reload by sending traffic from the server to the T1
+
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_upper_tor_port, action=change_configs_and_config_reload, upper_tor)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby', 'healthy')
+ """
+
+def test_standby_config_reload_downstream_active(ptf_server_port, t1_upper_tor_port, duthosts):
+ """
+ Verify operation of dual ToR setup after standby ToR config reload by sending traffic from the T1 to the server via the active ToR
+
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port, action=change_configs_and_config_reload, duthosts)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'standby', 'healthy')
+ """
+
+def test_active_config_reload_downstream_standby(ptf_server_port, t1_lower_tor_port, duthosts):
+ """
+ Verify operation of dual ToR setup after standby ToR config reload by sending traffic from the T1 to the server via the standby ToR
+
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_tor_port, expect_tunnel_packet=True, action=change_configs_and_config_reload, duthosts)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'standby')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'standby', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_tor_switch_upstream(ptf_server_port, t1_lower_tor_port, duthosts):
+ """
+ Verify operation of dual ToR setup after switching the active ToR by sending traffic from the server to the T1
+
+ Select the current standby ToR (should be the lower ToR) from duthosts
+
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_lower_tor_port, action=force_active_tor, current_standby_tor)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'standby')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'standby', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_tor_switch_downstream_active(ptf_server_port, t1_upper_tor_port):
+ """
+ Verify operation of dual ToR setup after switching the active ToR by sending traffic from the T1 to the server via the original active ToR
+
+ Select the current standby ToR (should be the lower ToR) from duthosts
+
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port, expected_tunnel_packet=True, action=force_active_tor, current_standby_tor)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'standby')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'standby', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_tor_switch_downstream_standby(ptf_server_port, t1_lower_tor_port):
+ """
+ Verify operation of dual ToR setup after switching the active ToR by sending traffic from the T1 to the server via the original standby ToR
+
+ Select the current standby ToR (should be the lower ToR) from duthosts
+
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_tor_port, action=force_active_tor, current_standby_tor)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'standby')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'standby', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+```
+
+### Link Failures
+
+```python
+def test_active_link_down_upstream(ptf_server_port, t1_lower_tor_port):
+ """
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_lower_tor_port, action=shutdown_active_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'unknown')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'unknown', 'unhealthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_active_link_down_downstream_active(ptf_server_port, t1_upper_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port, expect_tunnel_packet=True, action=shutdown_active_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'unknown')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'unknown', 'unhealthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_active_link_down_downstream_standby(ptf_server_port, t1_lower_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_port, action=shutdown_active_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'unknown')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'unknown', 'unhealthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_standby_link_down_upstream(ptf_server_port, t1_upper_tor_port):
+ """
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_upper_tor_port, action=shutdown_standby_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown', 'unhealthy')
+ """
+
+def test_standby_link_down_downstream_active(ptf_server_port, t1_upper_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port, action=shutdown_standby_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown', 'unhealthy')
+ """
+
+def test_standby_link_down_downstream_standby(ptf_server_port, t1_lower_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_tor_port, action=shutdown_standby_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown', 'unhealthy')
+ """
+
+def test_active_link_drop_upstream(ptf_server_port, t1_lower_tor_port):
+ """
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_lower_tor_port, action=drop_active_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'unknown')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'unknown', 'unhealthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_active_link_drop_downstream_active(ptf_server_port, t1_upper_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port, expect_tunnel_packet=True, action=drop_active_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'unknown')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'unknown', 'unhealthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_active_link_drop_downstream_standby(ptf_server_port, t1_lower_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_tor_port, action=drop_active_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'unknown')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'active')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'unknown', 'unhealthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'active', 'healthy')
+ """
+
+def test_standby_link_drop_upstream(ptf_server_port, t1_upper_tor_port):
+ """
+ Calls `send_server_to_t1_after_action(ptf_server_port, t1_upper_tor_port, action=drop_standby_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown', 'unhealthy')
+ """
+
+def test_standby_link_drop_downstream_active(ptf_server_port, t1_upper_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_upper_tor_port, action=drop_standby_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown', 'unhealthy')
+ """
+
+def test_standby_link_drop_downstream_standby(ptf_server_port, t1_lower_tor_port):
+ """
+ Calls `send_t1_to_server_after_action(ptf_server_port, t1_lower_tor_port, action=drop_standby_tor_mux_link)`
+ Expects `expect_app_db_values(upper_tor, tor_mux_intf, 'active')` and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown')`
+ Expects `expect_state_db_values(upper_tor, tor_mux_intf, 'active', 'healthy') and `expect_app_db_values(lower_tor, tor_mux_intf, 'unknown', 'unhealthy')
+ """
+
+
+```
+
+### ToR Component Failures
+
+Failure of individual components on the ToR (see HLD for test steps and expected results)
+
+```python
+def test_active_tor_bgp_down_upstream():
+def test_active_tor_bgp_down_downstream_active(): # Out of scope, method included for completeness
+def test_active_tor_bgp_down_downstream_standby():
+
+def test_standby_tor_bgp_down_upstream():
+def test_standby_tor_bgp_down_downstream_active():
+def test_standby_tor_bgp_down_downstream_standby(): # Out of scope, method included for completeness
+
+def test_active_tor_heartbeat_loss_upstream():
+def test_active_tor_heartbeat_loss_downstream_active():
+def test_active_tor_heartbeat_loss_downstream_standby():
+
+def test_standby_tor_heartbeat_loss_upstream():
+def test_standby_tor_heartbeat_loss_downstream_active():
+def test_standby_tor_heartbeat_loss_downstream_standby():
+```
+
+### ToR Failures
+
+Failure of the entire ToR (see HLD for test steps and expected results)
+
+```python
+def test_active_tor_failure_upstream():
+def test_active_tor_failure_downstream_active(): # Out of scope, method included for completeness
+def test_active_tor_failure_downstream_standby():
+
+def test_standby_tor_failure_upstream():
+def test_standby_tor_failure_downstream_active():
+def test_standby_tor_failure_downstream_standby(): # Out of scope, method included for completeness
+
+def test_active_tor_reboot_upstream():
+def test_active_tor_reboot_downstream_active(): # Out of scope, method included for completeness
+def test_active_tor_reboot_downstream_standby():
+
+def test_standby_tor_reboot_upstream():
+def test_standby_tor_reboot_downstream_active():
+def test_standby_tor_reboot_downstream_standby(): # Out of scope, method included for completeness
+```
+
+### Other Test Cases
+
+```python
+def test_grat_arp():
+ """
+ Sends an ARP request from the active ToR to the server. Checks that both ToRs learned the reply
+
+ No data plane operations/checks
+ """
+
+def test_proxy_arp():
+ """
+ For servers A and B, with server A having active ToR A and server B having active ToR B, send an ARP request from server A for server B's IP. Then send a packet from server A to server B.
+
+ Control plane: Checks that server A receives an ARP reply with ToR A's MAC for server B's IP
+ Data plane: Verify T1 receives IP-in-IP packet from ToR A to ToR B, and that server B receives the packet.
+ """
+def test_server_down():
+ """
+ Server stops sending all traffic.
+
+ Verify that the ToR states do not excessively flap between active/standby (check APP DB/STATE DB values at several intervals)
+
+ No data plane operations/checks
+ """
+
+def test_tunnel():
+ """
+ This test will no longer be planned. Explicitly checking encap/decap of tunnel packets is reserved for the orchagent tests. Test cases listed in this document will still verify correct tunnel packets.
+
+ ~~Send encapped server traffic from ToR A to ToR B, and ToR B to ToR A~~
+
+ ~~Verify that the T1 switch sees the expected IP-in-IP packet, and that each ToR receives the packet~~
+ """
+```
diff --git a/docs/testplan/dual_tor/img/downstream_packet_flow_active.jpg b/docs/testplan/dual_tor/img/downstream_packet_flow_active.jpg
new file mode 100755
index 00000000000..fa67fd442f8
Binary files /dev/null and b/docs/testplan/dual_tor/img/downstream_packet_flow_active.jpg differ
diff --git a/docs/testplan/dual_tor/img/downstream_packet_flow_standby.jpg b/docs/testplan/dual_tor/img/downstream_packet_flow_standby.jpg
new file mode 100755
index 00000000000..ce71b7edace
Binary files /dev/null and b/docs/testplan/dual_tor/img/downstream_packet_flow_standby.jpg differ
diff --git a/docs/testplan/dual_tor/img/packet_flow_upstream.jpg b/docs/testplan/dual_tor/img/packet_flow_upstream.jpg
new file mode 100755
index 00000000000..8b9bba6a934
Binary files /dev/null and b/docs/testplan/dual_tor/img/packet_flow_upstream.jpg differ
diff --git a/docs/testplan/dual_tor/img/testbed_overview.jpg b/docs/testplan/dual_tor/img/testbed_overview.jpg
new file mode 100755
index 00000000000..89a868bbd7f
Binary files /dev/null and b/docs/testplan/dual_tor/img/testbed_overview.jpg differ
diff --git a/docs/testplan/pfcwd/PFCWD_2SENDER_2RECEIVER.md b/docs/testplan/pfcwd/PFCWD_2SENDER_2RECEIVER.md
new file mode 100644
index 00000000000..7bf8791eb19
--- /dev/null
+++ b/docs/testplan/pfcwd/PFCWD_2SENDER_2RECEIVER.md
@@ -0,0 +1,86 @@
+This document describes how to test PFC watchdog in a 3-node topology with 2 senders and 2 receivers.
+
+## Background
+PFC watchdog is designed to detect and mitigate PFC storm received for each port. PFC pause frames is used in lossless Ethernet to pause the link partner from sending packets. Such back-pressure mechanism could propagate to the whole network and cause the network stop forwarding traffic. PFC watchdog is to detect *abnormal* back-pressure caused by receiving excessive PFC pause frames, and mitigate such situation by disable PFC caused pause temporarily.
+
+On SONiC, PFC watchdog is enabled at lossless priorities (e.g., 3 and 4) by default. PFC watchdog has three function blocks, i.e. detection, mitigation and restoration. You can find more details [here](https://github.com/Azure/SONiC/wiki/PFC-Watchdog).
+
+### PFC storm detection
+The PFC storm detection is for a switch to detect a lossless queue is receiving PFC storm from its link partner and the queue is in a paused state over *T0* amount of time. Even when the queue is empty, as soon as the duration for a queue in paused state exceeds T0 amount of time, the watchdog should detect such storm. T0 is called *PFC storm detection time*.
+
+### PFC storm mitigation
+Once PFC storm is detected on a queue, the watchdog can then have two actions, drop and forward at per queue level. When drop action is selected, following actions need to be implemented.
+
+* All existing packets in the output queue are discarded
+* All subsequent packets destine to the output queue are discarded
+* All subsequent packets received by the corresponding priority group of this queue are discarded including the pause frames received. As a result, the switch should not generate any pause frame to its neighbor due to congestion of this output queue.
+
+When forward action is selected, following actions need to be implemented.
+
+* The queue no longer honor the PFC frames received. All packets destined to the queue are forwarded as well as those packets that were in the queue.
+
+The default action is drop.
+
+### PFC storm restoration
+The watchdog should continue count the PFC frames received on the queue. If there is no PFC frame received over *T1* period. Then, re-enable the PFC on the queue and stop dropping packets if the previous mitigation was drop. T1 is called *PFC storm restoration time*.
+
+### PFC watchdog implementation
+PFC watchdog polls the states of each lossless queue every *T2* period. T2 is called polling interval. To reduce CPU overhead, T2 is typically of hundreds of milliseconds.
+
+## PFC watchdog commands on SONiC
+To get PFC watchdog configuraton:
+
+$ pfcwd show config
+
+To get PFC watchdog statistics:
+
+$ pfcwd show stats
+
+To start PFC watchdog using default parameters and action
+
+$ sudo pfcwd start_default
+
+To start PFC watchdog using specific time values and drop action on all the ports
+
+$ sudo pfcwd start --action drop ports all detection-time [detection time in ms] --restoration-time [restoration time in ms]
+
+To stop PFC watchdog
+
+$ sudo pfcwd stop
+
+Note that there is no way to clear PFC watchdog statistics unless we reload config database or minigraph.
+
+The testbed consists of three IXIA ports and a SONiC device under test (DUT) as follows. All the IXIA ports should have the same bandwidth capacity. To reduce the configuration complexity, we recommond configuring the switch as a Top of Rack (ToR) / Tier 0 (T0) switch and binding three switch interfaces to the Vlan. PFC watchdog must be enabled at the DUT.
+
+```
+ __________
+ | |
+IXIA port 1 ------ et1 | DUT |
+ | | et3 ------ IXIA port 3
+IXIA port 2 ------ et2 | |
+ |__________|
+
+```
+
+## Experiment Steps
+In this experiment, we need to create three types traffic items:
+
+- Test data traffic: IXIA port 1 sends bi-directional traffic to port 2 and 3 at a lossless priority (e.g., 3).
+
+- Background data traffic: IXIA port 1 sends bi-directional traffic to port 2 and 3 at all the lossy priorities.
+
+- PFC pause storm: Persistent PFC pause frames from the IXIA port 2 to et2 of DUT having same priority (e.g., 3) as test data traffic. To fully block the switch queue, the inter-frame transmission interval should be smaller than per-frame pause duration.
+
+The duration of test data traffic and background data traffic is $T_{data}$. The duration of PFC pause storm is $T_{storm}$. Let’s use $T_{detect}$ and $T_{poll}$ to denote the detection time, and polling interval of PFC watchdog.
+
+We start all the traffic items at time 0, wait for all the traffic items to finish, and then check the following items:
+
+- When $T_{storm}$ is larger than $T_{detect}$ + $T_{poll}$:
+ - PFC watchdog is triggered on the corresponding lossless priority of et2.
+ - Test data traffic between port 1 and 2 experience packet losses.
+ - All the other data traffic items are not impacted.
+
+- When $T_{storm}$ is smaller than $T_{detect}$:
+ - PFC watchdog is NOT triggered at interface et2.
+ - Test data traffic from port 1 to port 2 is delayed. Its throughput is lower than the demand. But it should have no packet drops.
+ - All the other data traffic items are not impacted.
diff --git a/setup-container.sh b/setup-container.sh
index f02c1dae1fc..a35248ab46c 100755
--- a/setup-container.sh
+++ b/setup-container.sh
@@ -21,7 +21,7 @@ function setup_local_image() {
chmod 600 $tmpdir/id_rsa
cat < $tmpdir/Dockerfile.j2
-FROM {{ DOCKER_REGISTRY }}/{{ DOCKER_SONIC_MGMT }}
+FROM {{ IMAGE_ID }}
RUN sudo groupadd -g {{ GROUPID }} {{ GROUPNAME }}
RUN sudo useradd --shell /bin/bash -u {{ USERID }} -g {{ GROUPID }} -d /home/{{ USERNAME }} {{ USERNAME }}
@@ -35,13 +35,13 @@ USER {{ USERNAME }}
ADD --chown={{ USERNAME }} id_rsa /home/{{ USERNAME }}/.ssh/id_rsa
ENV HOME=/home/{{ USERNAME }}
+ENV USER {{ USERNAME }}
WORKDIR $HOME
EOF
cat < $tmpdir/data.env
-DOCKER_SONIC_MGMT=$DOCKER_SONIC_MGMT
-DOCKER_REGISTRY=$DOCKER_REGISTRY
+IMAGE_ID=$IMAGE_ID
GROUPID=$HOST_GROUP_ID
USERID=$HOST_USER_ID
GROUPNAME=$USER
diff --git a/spytest/.gitignore b/spytest/.gitignore
index 00cf3f8b3d2..2160fb324b0 100644
--- a/spytest/.gitignore
+++ b/spytest/.gitignore
@@ -80,7 +80,7 @@ fabric.properties
.idea/caches/build_file_checksums.ser
# JetBrains templates
-**___jb_tmp___
+**/*___jb_tmp___
### PyCharm Patch ###
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
diff --git a/tests/acl/templates/acltb_table.j2 b/tests/acl/templates/acltb_table.j2
deleted file mode 100644
index 1215e10bf37..00000000000
--- a/tests/acl/templates/acltb_table.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{
- "ACL_TABLE": {
- "{{ acl_table_name }}": {
- "policy_desc": "{{ acl_table_name }}, {{ acl_table_stage }}",
- "ports": [
-{% for port in acl_table_ports %}
- "{{ port }}"{% if not loop.last %},
-{% endif %}{% endfor %}],
-{% if acl_table_stage == "egress" %}
- "stage": "egress",
-{% endif %}
- "type": "{{ acl_table_type }}"
- }
- }
-}
diff --git a/tests/acl/test_acl.py b/tests/acl/test_acl.py
index 7a753a478e3..2ddb1d41771 100644
--- a/tests/acl/test_acl.py
+++ b/tests/acl/test_acl.py
@@ -166,9 +166,6 @@ def ip_version(request, tbinfo, duthosts, rand_one_dut_hostname):
if tbinfo["topo"]["type"] == "t0" and request.param == "ipv6":
pytest.skip("IPV6 ACL test not currently supported on t0 testbeds")
- if "201911" in duthosts[rand_one_dut_hostname].os_version and request.param == "ipv6":
- pytest.skip("acl-loader does not handle IPV6 default drop rule correctly in 201911")
-
return request.param
@@ -248,54 +245,15 @@ def stage(request, duthosts, rand_one_dut_hostname):
@pytest.fixture(scope="module")
-def acl_table_config(duthosts, rand_one_dut_hostname, setup, stage, ip_version):
- """Generate ACL table configuration files and deploy them to the DUT.
+def acl_table(duthosts, rand_one_dut_hostname, setup, stage, ip_version, backup_and_restore_config_db_module):
+ """Apply ACL table configuration and remove after tests.
Args:
duthosts: All DUTs belong to the testbed.
rand_one_dut_hostname: hostname of a random chosen dut to run test.
setup: Parameters for the ACL tests.
stage: The ACL stage under test.
-
- Returns:
- A dictionary containing the table name and the corresponding configuration file.
-
- """
- duthost = duthosts[rand_one_dut_hostname]
-
- acl_table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
-
- acl_table_vars = {
- "acl_table_name": acl_table_name,
- "acl_table_ports": setup["acl_table_ports"],
- "acl_table_stage": stage,
- "acl_table_type": "L3" if ip_version == "ipv4" else "L3V6"
- }
-
- logger.info("ACL table configuration:\n{}".format(pprint.pformat(acl_table_vars)))
-
- acl_table_config_file = "acl_table_{}.json".format(acl_table_name)
- acl_table_config_path = os.path.join(DUT_TMP_DIR, acl_table_config_file)
-
- logger.info("Generating DUT config for ACL table \"{}\"".format(acl_table_name))
- duthost.host.options["variable_manager"].extra_vars.update(acl_table_vars)
- duthost.template(src=os.path.join(TEMPLATE_DIR, ACL_TABLE_TEMPLATE),
- dest=acl_table_config_path)
-
- return {
- "table_name": acl_table_name,
- "config_file": acl_table_config_path
- }
-
-
-@pytest.fixture(scope="module")
-def acl_table(duthosts, rand_one_dut_hostname, acl_table_config, backup_and_restore_config_db_module):
- """Apply ACL table configuration and remove after tests.
-
- Args:
- duthosts: All DUTs belong to the testbed.
- rand_one_dut_hostname: hostname of a random chosen dut to run test.
- acl_table_config: A dictionary describing the ACL table configuration to apply.
+ ip_version: The IP version under test.
backup_and_restore_config_db_module: A fixture that handles restoring Config DB
after the tests are over.
@@ -304,8 +262,16 @@ def acl_table(duthosts, rand_one_dut_hostname, acl_table_config, backup_and_rest
"""
duthost = duthosts[rand_one_dut_hostname]
- table_name = acl_table_config["table_name"]
- config_file = acl_table_config["config_file"]
+ table_name = "DATA_{}_{}_TEST".format(stage.upper(), ip_version.upper())
+
+ acl_table_config = {
+ "table_name": table_name,
+ "table_ports": ",".join(setup["acl_table_ports"]),
+ "table_stage": stage,
+ "table_type": "L3" if ip_version == "ipv4" else "L3V6"
+ }
+
+ logger.info("Generated ACL table configuration:\n{}".format(pprint.pformat(acl_table_config)))
loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="acl")
loganalyzer.load_common_config()
@@ -313,10 +279,15 @@ def acl_table(duthosts, rand_one_dut_hostname, acl_table_config, backup_and_rest
try:
loganalyzer.expect_regex = [LOG_EXPECT_ACL_TABLE_CREATE_RE]
with loganalyzer:
- logger.info("Creating ACL table from config file: \"{}\"".format(config_file))
-
- # TODO: Use `config` CLI to create ACL table
- duthost.command("sonic-cfggen -j {} --write-to-db".format(config_file))
+ logger.info("Creating ACL table: \"{}\"".format(table_name))
+ duthost.command(
+ "config acl add table {} {} -s {} -p {}".format(
+ table_name,
+ acl_table_config["table_type"],
+ acl_table_config["table_stage"],
+ acl_table_config["table_ports"]
+ )
+ )
except LogAnalyzerError as err:
# Cleanup Config DB if table creation failed
logger.error("ACL table creation failed, attempting to clean-up...")
@@ -409,7 +380,7 @@ def acl_rules(self, duthosts, rand_one_dut_hostname, localhost, setup, acl_table
self.post_setup_hook(duthost, localhost, populate_vlan_arp_entries, tbinfo)
except LogAnalyzerError as err:
# Cleanup Config DB if rule creation failed
- logger.error("ACL table creation failed, attempting to clean-up...")
+ logger.error("ACL rule application failed, attempting to clean-up...")
self.teardown_rules(duthost)
raise err
@@ -787,9 +758,11 @@ def setup_rules(self, dut, acl_table, ip_version):
"""
table_name = acl_table["table_name"]
- dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
+ dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating basic ACL rules config for ACL table \"{}\"".format(table_name))
+
+ dut_conf_file_path = os.path.join(DUT_TMP_DIR, "acl_rules_{}.json".format(table_name))
dut.template(src=os.path.join(TEMPLATE_DIR, ACL_RULES_FULL_TEMPLATE[ip_version]),
dest=dut_conf_file_path)
@@ -813,6 +786,7 @@ def setup_rules(self, dut, acl_table, ip_version):
"""
table_name = acl_table["table_name"]
+ dut.host.options["variable_manager"].extra_vars.update({"acl_table_name": table_name})
logger.info("Generating incremental ACL rules config for ACL table \"{}\""
.format(table_name))
diff --git a/tests/bgp/bgp_helpers.py b/tests/bgp/bgp_helpers.py
index 1e6e07e477d..78f39d045d7 100644
--- a/tests/bgp/bgp_helpers.py
+++ b/tests/bgp/bgp_helpers.py
@@ -1,6 +1,7 @@
import os
import re
import time
+import json
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = os.path.join('tmp', os.path.basename(BASE_DIR))
@@ -9,6 +10,14 @@
BGP_NO_EXPORT_TEMPLATE = 'bgp_no_export.j2'
BGP_CONFIG_BACKUP = 'backup_bgpd.conf.j2'
DEFAULT_BGP_CONFIG = 'bgp:/usr/share/sonic/templates/bgpd/bgpd.conf.j2'
+DUMP_FILE = "/tmp/bgp_monitor_dump.log"
+CUSTOM_DUMP_SCRIPT = "bgp/bgp_monitor_dump.py"
+CUSTOM_DUMP_SCRIPT_DEST = "/usr/share/exabgp/bgp_monitor_dump.py"
+BGPMON_TEMPLATE_FILE = 'bgp/templates/bgp_template.j2'
+BGPMON_CONFIG_FILE = '/tmp/bgpmon.json'
+BGP_MONITOR_NAME = "bgp_monitor"
+BGP_MONITOR_PORT = 7000
+BGP_ANNOUNCE_TIME = 30 #should be enough to receive and parse bgp updates
def apply_bgp_config(duthost, template_name):
@@ -74,3 +83,38 @@ def apply_default_bgp_config(duthost, copy=False):
# Skip 'start-limit-hit' threshold
duthost.shell('systemctl reset-failed bgp')
restart_bgp(duthost)
+
+def parse_exabgp_dump(host):
+ """
+ Parse the dump file of exabgp, and build a set for checking routes
+ """
+ routes = set()
+ output_lines = host.shell("cat {}".format(DUMP_FILE), verbose=False)['stdout_lines']
+ for line in output_lines:
+ routes.add(line)
+ return routes
+
+def parse_rib(host, ip_ver):
+ """
+ Parse output of 'show bgp ipv4/6' and parse into a dict for checking routes
+ """
+ routes = {}
+ cmd = "vtysh -c \"show bgp ipv%d json\"" % ip_ver
+ route_data = json.loads(host.shell(cmd, verbose=False)['stdout'])
+ for ip, nexthops in route_data['routes'].iteritems():
+ aspath = set()
+ for nexthop in nexthops:
+ aspath.add(nexthop['path'])
+ routes[ip] = aspath
+ return routes
+
+def verify_all_routes_announce_to_bgpmon(duthost, ptfhost):
+ time.sleep(BGP_ANNOUNCE_TIME)
+ bgpmon_routes = parse_exabgp_dump(ptfhost)
+ rib_v4 = parse_rib(duthost, 4)
+ rib_v6 = parse_rib(duthost, 6)
+ routes_dut = dict(rib_v4.items() + rib_v6.items())
+ for route in routes_dut.keys():
+ if route not in bgpmon_routes:
+ return False
+ return True
diff --git a/tests/bgp/conftest.py b/tests/bgp/conftest.py
index 2bd19432f42..63414c72198 100644
--- a/tests/bgp/conftest.py
+++ b/tests/bgp/conftest.py
@@ -7,11 +7,13 @@
import pytest
import random
+from jinja2 import Template
from tests.common.helpers.assertions import pytest_assert as pt_assert
from tests.common.helpers.generators import generate_ips
from tests.common.helpers.parallel import parallel_run
from tests.common.helpers.parallel import reset_ansible_local_tmp
from tests.common.utilities import wait_until
+from tests.common.utilities import wait_tcp_connection
from tests.common import config_reload
from bgp_helpers import define_config
from bgp_helpers import apply_default_bgp_config
@@ -19,6 +21,7 @@
from bgp_helpers import TEMPLATE_DIR
from bgp_helpers import BGP_PLAIN_TEMPLATE
from bgp_helpers import BGP_NO_EXPORT_TEMPLATE
+from bgp_helpers import DUMP_FILE, CUSTOM_DUMP_SCRIPT, CUSTOM_DUMP_SCRIPT_DEST, BGPMON_TEMPLATE_FILE, BGPMON_CONFIG_FILE, BGP_MONITOR_NAME, BGP_MONITOR_PORT
logger = logging.getLogger(__name__)
@@ -194,27 +197,44 @@ def _setup_interfaces_t0(mg_facts, peer_count):
def _setup_interfaces_t1(mg_facts, peer_count):
try:
connections = []
- ipv4_interfaces = [_ for _ in mg_facts["minigraph_interfaces"] if _is_ipv4_address(_['addr'])]
- used_subnets = [ipaddress.ip_network(_["subnet"]) for _ in ipv4_interfaces]
- subnet_prefixlen = used_subnets[0].prefixlen
- used_subnets = set(used_subnets)
- for pt in mg_facts["minigraph_portchannel_interfaces"]:
- if _is_ipv4_address(pt["addr"]):
- used_subnets.add(ipaddress.ip_network(pt["subnet"]))
+ ipv4_interfaces = []
+ used_subnets = set()
+ if mg_facts["minigraph_interfaces"]:
+ for intf in mg_facts["minigraph_interfaces"]:
+ if _is_ipv4_address(intf["addr"]):
+ ipv4_interfaces.append(intf["attachto"])
+ used_subnets.add(ipaddress.ip_network(intf["subnet"]))
+
+ ipv4_lag_interfaces = []
+ if mg_facts["minigraph_portchannel_interfaces"]:
+ for pt in mg_facts["minigraph_portchannel_interfaces"]:
+ if _is_ipv4_address(pt["addr"]):
+ pt_members = mg_facts["minigraph_portchannels"][pt["attachto"]]["members"]
+ # Only use LAG with 1 member for bgpmon session between PTF,
+ # It's because exabgp on PTF is bind to single interface
+ if len(pt_members) == 1:
+ ipv4_lag_interfaces.append(pt["attachto"])
+ used_subnets.add(ipaddress.ip_network(pt["subnet"]))
+
+ subnet_prefixlen = list(used_subnets)[0].prefixlen
_subnets = ipaddress.ip_network(u"10.0.0.0/24").subnets(new_prefix=subnet_prefixlen)
subnets = (_ for _ in _subnets if _ not in used_subnets)
- for intf, subnet in zip(random.sample(ipv4_interfaces, peer_count), subnets):
+ for intf, subnet in zip(random.sample(ipv4_interfaces + ipv4_lag_interfaces, peer_count), subnets):
conn = {}
local_addr, neighbor_addr = [_ for _ in subnet][:2]
- conn["local_intf"] = "%s" % intf["attachto"]
+ conn["local_intf"] = "%s" % intf
conn["local_addr"] = "%s/%s" % (local_addr, subnet_prefixlen)
conn["neighbor_addr"] = "%s/%s" % (neighbor_addr, subnet_prefixlen)
- conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][intf["attachto"]]
+ if intf.startswith("PortChannel"):
+ member_intf = mg_facts["minigraph_portchannels"][intf]["members"][0]
+ conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][member_intf]
+ else:
+ conn["neighbor_intf"] = "eth%s" % mg_facts["minigraph_port_indices"][intf]
connections.append(conn)
for conn in connections:
- # bind the ip to the interface and notify bgpcfgd
+ # bind the ip to the interface and notify bgpcfgd
duthost.shell("config interface ip add %s %s" % (conn["local_intf"], conn["local_addr"]))
ptfhost.shell("ifconfig %s %s" % (conn["neighbor_intf"], conn["neighbor_addr"]))
@@ -294,3 +314,47 @@ def backup_bgp_config(duthost):
except Exception:
config_reload(duthost)
apply_default_bgp_config(duthost)
+
+@pytest.fixture(scope="module")
+def bgpmon_setup_teardown(ptfhost, duthost, localhost, setup_interfaces):
+ connection = setup_interfaces[0]
+ dut_lo_addr = connection['local_addr'].split("/")[0]
+ peer_addr = connection['neighbor_addr'].split("/")[0]
+ mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
+ asn = mg_facts['minigraph_bgp_asn']
+ # TODO: Add a common method to load BGPMON config for test_bgpmon and test_traffic_shift
+ logger.info("Configuring bgp monitor session on DUT")
+ bgpmon_args = {
+ 'db_table_name': 'BGP_MONITORS',
+ 'peer_addr': peer_addr,
+ 'asn': asn,
+ 'local_addr': dut_lo_addr,
+ 'peer_name': BGP_MONITOR_NAME
+ }
+ bgpmon_template = Template(open(BGPMON_TEMPLATE_FILE).read())
+ duthost.copy(content=bgpmon_template.render(**bgpmon_args),
+ dest=BGPMON_CONFIG_FILE)
+ # Start bgpmon on DUT
+ logger.info("Starting bgpmon on DUT")
+ duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
+
+ logger.info("Starting bgp monitor session on PTF")
+ ptfhost.file(path=DUMP_FILE, state="absent")
+ ptfhost.copy(src=CUSTOM_DUMP_SCRIPT, dest=CUSTOM_DUMP_SCRIPT_DEST)
+ ptfhost.exabgp(name=BGP_MONITOR_NAME,
+ state="started",
+ local_ip=peer_addr,
+ router_id=peer_addr,
+ peer_ip=dut_lo_addr,
+ local_asn=asn,
+ peer_asn=asn,
+ port=BGP_MONITOR_PORT,
+ dump_script=CUSTOM_DUMP_SCRIPT_DEST)
+ pt_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT),
+ "Failed to start bgp monitor session on PTF")
+ yield
+ # Cleanup bgp monitor
+ duthost.shell("redis-cli -n 4 -c DEL 'BGP_MONITORS|{}'".format(peer_addr))
+ ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent")
+ ptfhost.file(path=CUSTOM_DUMP_SCRIPT_DEST, state="absent")
+ ptfhost.file(path=DUMP_FILE, state="absent")
diff --git a/tests/bgp/test_bgp_allow_list.py b/tests/bgp/test_bgp_allow_list.py
index 78f294d88cd..6193aefbecf 100644
--- a/tests/bgp/test_bgp_allow_list.py
+++ b/tests/bgp/test_bgp_allow_list.py
@@ -12,8 +12,10 @@
from jinja2 import Template
from natsort import natsorted
from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.constants import DEFAULT_NAMESPACE
from tests.common.helpers.parallel import reset_ansible_local_tmp
from tests.common.helpers.parallel import parallel_run
+from bgp_helpers import verify_all_routes_announce_to_bgpmon
pytestmark = [
pytest.mark.topology('t1'),
@@ -92,8 +94,16 @@ def setup(tbinfo, nbrhosts, duthosts, rand_one_dut_hostname):
tor1_exabgp_port = EXABGP_BASE_PORT + tor1_offset
tor1_exabgp_port_v6 = EXABGP_BASE_PORT_V6 + tor1_offset
+ mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
+ tor1_namespace = DEFAULT_NAMESPACE
+ for dut_port, neigh in mg_facts['minigraph_neighbors'].items():
+ if tor1 == neigh['name'] and neigh['namespace']:
+ tor1_namespace = neigh['namespace']
+ break
+
setup_info = {
'tor1': tor1,
+ 'tor1_namespace': tor1_namespace,
'tor1_exabgp_port': tor1_exabgp_port,
'tor1_exabgp_port_v6': tor1_exabgp_port_v6,
'other_neighbors': other_neighbors,
@@ -122,23 +132,25 @@ def update_routes(action, ptfip, port, route):
@pytest.fixture
-def load_remove_allow_list(duthosts, rand_one_dut_hostname, request):
+def load_remove_allow_list(duthosts, setup, rand_one_dut_hostname, request):
duthost = duthosts[rand_one_dut_hostname]
allowed_list_prefixes = ALLOW_LIST['BGP_ALLOWED_PREFIXES']
for k,v in allowed_list_prefixes.items():
v['default_action'] = request.param
-
+
+ namespace = setup['tor1_namespace']
duthost.copy(content=json.dumps(ALLOW_LIST, indent=3), dest=ALLOW_LIST_PREFIX_JSON_FILE)
- duthost.shell('sonic-cfggen -j {} -w'.format(ALLOW_LIST_PREFIX_JSON_FILE))
+ duthost.shell('sonic-cfggen {} -j {} -w'.format('-n ' + namespace if namespace else '', ALLOW_LIST_PREFIX_JSON_FILE))
time.sleep(3)
yield request.param
- allow_list_keys = duthost.shell('redis-cli --raw -n 4 keys "BGP_ALLOWED_PREFIXES*"')['stdout_lines']
+ allow_list_keys = duthost.shell('sonic-db-cli {} CONFIG_DB keys "BGP_ALLOWED_PREFIXES*"'.format('-n ' + namespace if namespace else ''))['stdout_lines']
for key in allow_list_keys:
- duthost.shell('redis-cli -n 4 del "{}"'.format(key))
+ duthost.shell('sonic-db-cli {} CONFIG_DB del "{}"'.format('-n ' + namespace if namespace else '', key))
+
duthost.shell('rm -rf {}'.format(ALLOW_LIST_PREFIX_JSON_FILE))
@@ -209,10 +221,10 @@ def check_routes_on_tor1(self, setup, nbrhosts):
route_entries = tor1_route['vrfs']['default']['bgpRouteEntries']
pytest_assert(prefix in route_entries, 'Announced route {} not found on {}'.format(prefix, tor1))
- def check_routes_on_dut(self, duthost):
+ def check_routes_on_dut(self, duthost, namespace):
for prefixes in PREFIX_LISTS.values():
for prefix in prefixes:
- dut_route = duthost.get_route(prefix)
+ dut_route = duthost.get_route(prefix, namespace)
pytest_assert(dut_route, 'Route {} is not found on DUT'.format(prefix))
def check_results(self, results):
@@ -355,20 +367,24 @@ def check_other_neigh(nbrhosts, permit, node=None, results=None):
results = parallel_run(check_other_neigh, (nbrhosts, permit), {}, other_neighbors, timeout=180)
self.check_results(results)
- def test_default_allow_list_preconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts):
+ def test_default_allow_list_preconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, ptfhost, bgpmon_setup_teardown):
permit = True if DEFAULT_ACTION == "permit" else False
duthost = duthosts[rand_one_dut_hostname]
self.check_routes_on_tor1(setup, nbrhosts)
- self.check_routes_on_dut(duthost)
+ self.check_routes_on_dut(duthost, setup['tor1_namespace'])
self.check_routes_on_neighbors_empty_allow_list(nbrhosts, setup, permit)
-
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
+ "Not all routes are announced to bgpmon")
+
@pytest.mark.parametrize('load_remove_allow_list', ["permit", "deny"], indirect=['load_remove_allow_list'])
- def test_allow_list(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, load_remove_allow_list):
+ def test_allow_list(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, load_remove_allow_list, ptfhost, bgpmon_setup_teardown):
permit = True if load_remove_allow_list == "permit" else False
duthost = duthosts[rand_one_dut_hostname]
self.check_routes_on_tor1(setup, nbrhosts)
- self.check_routes_on_dut(duthost)
+ self.check_routes_on_dut(duthost, setup['tor1_namespace'])
self.check_routes_on_neighbors(nbrhosts, setup, permit)
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
+ "Not all routes are announced to bgpmon")
- def test_default_allow_list_postconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts):
- self.test_default_allow_list_preconfig(duthosts, rand_one_dut_hostname, setup, nbrhosts)
+ def test_default_allow_list_postconfig(self, duthosts, rand_one_dut_hostname, setup, nbrhosts, ptfhost, bgpmon_setup_teardown):
+ self.test_default_allow_list_preconfig(duthosts, rand_one_dut_hostname, setup, nbrhosts, ptfhost, bgpmon_setup_teardown)
diff --git a/tests/bgp/test_bgp_bbr.py b/tests/bgp/test_bgp_bbr.py
index 59da054a8c6..f53fcb20ae7 100644
--- a/tests/bgp/test_bgp_bbr.py
+++ b/tests/bgp/test_bgp_bbr.py
@@ -15,6 +15,7 @@
from jinja2 import Template
from natsort import natsorted
from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.constants import DEFAULT_NAMESPACE
from tests.common.helpers.parallel import reset_ansible_local_tmp
from tests.common.helpers.parallel import parallel_run
@@ -51,38 +52,38 @@ def bbr_default_state(setup):
return setup['bbr_default_state']
-def enable_bbr(duthost):
+def enable_bbr(duthost, namespace):
logger.info('Enable BGP_BBR')
- duthost.shell('sonic-cfggen -j /tmp/enable_bbr.json -w ')
+ duthost.shell('sonic-cfggen {} -j /tmp/enable_bbr.json -w '.format('-n ' + namespace if namespace else ''))
time.sleep(3)
-def disable_bbr(duthost):
+def disable_bbr(duthost, namespace):
logger.info('Disable BGP_BBR')
- duthost.shell('sonic-cfggen -j /tmp/disable_bbr.json -w')
+ duthost.shell('sonic-cfggen {} -j /tmp/disable_bbr.json -w'.format('-n ' + namespace if namespace else ''))
time.sleep(3)
@pytest.fixture
-def restore_bbr_default_state(duthosts, rand_one_dut_hostname, bbr_default_state):
+def restore_bbr_default_state(duthosts, setup, rand_one_dut_hostname, bbr_default_state):
yield
duthost = duthosts[rand_one_dut_hostname]
if bbr_default_state == 'enabled':
- enable_bbr(duthost)
+ enable_bbr(duthost, setup['tor1_namespace'])
else:
- disable_bbr(duthost)
+ disable_bbr(duthost, setup['tor1_namespace'])
@pytest.fixture
-def config_bbr_disabled(duthosts, rand_one_dut_hostname, restore_bbr_default_state):
+def config_bbr_disabled(duthosts, setup, rand_one_dut_hostname, restore_bbr_default_state):
duthost = duthosts[rand_one_dut_hostname]
- disable_bbr(duthost)
+ disable_bbr(duthost, setup['tor1_namespace'])
@pytest.fixture
-def config_bbr_enabled(duthosts, rand_one_dut_hostname, restore_bbr_default_state):
+def config_bbr_enabled(duthosts, setup, rand_one_dut_hostname, restore_bbr_default_state):
duthost = duthosts[rand_one_dut_hostname]
- enable_bbr(duthost)
+ enable_bbr(duthost, setup['tor1_namespace'])
@pytest.fixture(scope='module')
@@ -121,6 +122,12 @@ def setup(duthosts, rand_one_dut_hostname, tbinfo, nbrhosts):
else:
neigh_peer_map[name].update({'peer_addr_v6': peer_addr})
+ tor1_namespace = DEFAULT_NAMESPACE
+ for dut_port, neigh in mg_facts['minigraph_neighbors'].items():
+ if tor1 == neigh['name']:
+ tor1_namespace = neigh['namespace']
+ break
+
# Announce route to one of the T0 VM
tor1_offset = tbinfo['topo']['properties']['topology']['VMs'][tor1]['vm_offset']
tor1_exabgp_port = EXABGP_BASE_PORT + tor1_offset
@@ -144,6 +151,7 @@ def setup(duthosts, rand_one_dut_hostname, tbinfo, nbrhosts):
'tor1': tor1,
'other_vms': other_vms,
'tor1_offset': tor1_offset,
+ 'tor1_namespace': tor1_namespace,
'tor1_exabgp_port': tor1_exabgp_port,
'tor1_exabgp_port_v6': tor1_exabgp_port_v6,
'dut_asn': dut_asn,
@@ -218,7 +226,7 @@ def check_bbr_route_propagation(duthost, nbrhosts, setup, route, accepted=True):
# Check route on DUT
logger.info('Check route on DUT')
- dut_route = duthost.get_route(route.prefix)
+ dut_route = duthost.get_route(route.prefix, setup['tor1_namespace'])
if accepted:
pytest_assert(dut_route, 'No route for {} found on DUT'.format(route.prefix))
dut_route_aspath = dut_route['paths'][0]['aspath']['string']
diff --git a/tests/bgp/test_bgp_update_timer.py b/tests/bgp/test_bgp_update_timer.py
index ab6ef41e306..1eb67d2355b 100644
--- a/tests/bgp/test_bgp_update_timer.py
+++ b/tests/bgp/test_bgp_update_timer.py
@@ -48,7 +48,7 @@ class BGPNeighbor(object):
def __init__(self, duthost, ptfhost, name,
neighbor_ip, neighbor_asn,
- dut_ip, dut_asn, port, is_quagga=False):
+ dut_ip, dut_asn, port, neigh_type, is_quagga=False):
self.duthost = duthost
self.ptfhost = ptfhost
self.ptfip = ptfhost.mgmt_ip
@@ -58,6 +58,7 @@ def __init__(self, duthost, ptfhost, name,
self.peer_ip = dut_ip
self.peer_asn = dut_asn
self.port = port
+ self.type = neigh_type
self.is_quagga = is_quagga
def start_session(self):
@@ -84,7 +85,7 @@ def start_session(self):
neighbor_lo_addr=self.ip,
neighbor_mgmt_addr=self.ip,
neighbor_hwsku=None,
- neighbor_type="ToRRouter"
+ neighbor_type=self.type
)
_write_variable_from_j2_to_configdb(
@@ -166,6 +167,17 @@ def common_setup_teardown(duthost, is_quagga, ptfhost, setup_interfaces):
mg_facts = duthost.minigraph_facts(host=duthost.hostname)["ansible_facts"]
conn0, conn1 = setup_interfaces
dut_asn = mg_facts["minigraph_bgp_asn"]
+
+ dut_type = ''
+ for k,v in mg_facts['minigraph_devices'].iteritems():
+ if k == duthost.hostname:
+ dut_type = v['type']
+
+ if dut_type == 'ToRRouter':
+ neigh_type = 'LeafRouter'
+ else:
+ neigh_type = 'ToRRouter'
+
bgp_neighbors = (
BGPNeighbor(
duthost,
@@ -176,6 +188,7 @@ def common_setup_teardown(duthost, is_quagga, ptfhost, setup_interfaces):
conn0["local_addr"].split("/")[0],
dut_asn,
NEIGHBOR_PORT0,
+ neigh_type,
is_quagga=is_quagga
),
BGPNeighbor(
@@ -187,6 +200,7 @@ def common_setup_teardown(duthost, is_quagga, ptfhost, setup_interfaces):
conn1["local_addr"].split("/")[0],
dut_asn,
NEIGHBOR_PORT1,
+ neigh_type,
is_quagga=is_quagga
)
)
diff --git a/tests/bgp/test_bgpmon.py b/tests/bgp/test_bgpmon.py
index 4caa569d85e..8741705b955 100644
--- a/tests/bgp/test_bgpmon.py
+++ b/tests/bgp/test_bgpmon.py
@@ -8,40 +8,19 @@
import json
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import]
-from tests.common.helpers.generators import generate_ips as generate_ips
+from tests.common.helpers.generators import generate_ip_through_default_route
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait_until
-
+from bgp_helpers import BGPMON_TEMPLATE_FILE, BGPMON_CONFIG_FILE, BGP_MONITOR_NAME, BGP_MONITOR_PORT
pytestmark = [
pytest.mark.topology('any'),
]
-BGPMON_TEMPLATE_FILE = 'bgp/templates/bgp_template.j2'
-BGPMON_CONFIG_FILE = '/tmp/bgpmon.json'
BGP_PORT = 179
BGP_CONNECT_TIMEOUT = 121
ZERO_ADDR = r'0.0.0.0/0'
-BGP_MONITOR_NAME = "bgp_monitor"
-BGP_MONITOR_PORT = 7000
logger = logging.getLogger(__name__)
-def route_through_default_routes(host, ip_addr):
- output = host.shell("show ip route {} json".format(ip_addr))['stdout']
- routes_info = json.loads(output)
- ret = True
- for prefix in routes_info.keys():
- if prefix != ZERO_ADDR:
- ret = False
- break
- return ret
-
-def generate_ip_through_default_route(host):
- # Generate an IP address routed through default routes
- for leading in range(11, 255):
- ip_addr = generate_ips(1, "{}.0.0.1/24".format(leading), [])[0]
- if route_through_default_routes(host, ip_addr):
- return ip_addr
- return None
def get_default_route_ports(host):
mg_facts = host.minigraph_facts(host=host.hostname)['ansible_facts']
diff --git a/tests/bgp/test_traffic_shift.py b/tests/bgp/test_traffic_shift.py
index aaf9dcc94cb..ac1a0036864 100644
--- a/tests/bgp/test_traffic_shift.py
+++ b/tests/bgp/test_traffic_shift.py
@@ -1,11 +1,8 @@
import pytest
import logging
-import json
-import time
import ipaddr as ipaddress
+from bgp_helpers import parse_rib, verify_all_routes_announce_to_bgpmon
from tests.common.helpers.assertions import pytest_assert
-from tests.common.utilities import wait_tcp_connection
-from jinja2 import Template
import re
pytestmark = [
@@ -18,65 +15,11 @@
TS_MAINTENANCE = "System Mode: Maintenance"
TS_INCONSISTENT = "System Mode: Not consistent"
-DUMP_FILE = "/tmp/bgp_monitor_dump.log"
-CUSTOM_DUMP_SCRIPT = "bgp/bgp_monitor_dump.py"
-CUSTOM_DUMP_SCRIPT_DEST = "/usr/share/exabgp/bgp_monitor_dump.py"
-BGP_MONITOR_PORT = 7000
-BGP_MONITOR_NAME = "bgp_monitor"
-BGP_ANNOUNCE_TIME = 30 #should be enough to receive and parse bgp updates
-
-# TODO: remove me
-BGPMON_TEMPLATE_FILE = 'bgp/templates/bgp_template.j2'
-BGPMON_CONFIG_FILE = '/tmp/bgpmon.json'
-
-PEER_COUNT = 1
-
@pytest.fixture
def traffic_shift_community(duthost):
community = duthost.shell('sonic-cfggen -y /etc/sonic/constants.yml -v constants.bgp.traffic_shift_community')['stdout']
return community
-@pytest.fixture
-def common_setup_teardown(ptfhost, duthost, localhost, setup_interfaces):
- connection = setup_interfaces[0]
- dut_lo_addr = connection['local_addr'].split("/")[0]
- peer_addr = connection['neighbor_addr'].split("/")[0]
- mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
- asn = mg_facts['minigraph_bgp_asn']
- # TODO: Add a common method to load BGPMON config for test_bgpmon and test_traffic_shift
- logger.info("Configuring bgp monitor session on DUT")
- bgpmon_args = {
- 'db_table_name': 'BGP_MONITORS',
- 'peer_addr': peer_addr,
- 'asn': asn,
- 'local_addr': dut_lo_addr,
- 'peer_name': BGP_MONITOR_NAME
- }
- bgpmon_template = Template(open(BGPMON_TEMPLATE_FILE).read())
- duthost.copy(content=bgpmon_template.render(**bgpmon_args),
- dest=BGPMON_CONFIG_FILE)
-
- logger.info("Starting bgp monitor session on PTF")
- ptfhost.file(path=DUMP_FILE, state="absent")
- ptfhost.copy(src=CUSTOM_DUMP_SCRIPT, dest=CUSTOM_DUMP_SCRIPT_DEST)
- ptfhost.exabgp(name=BGP_MONITOR_NAME,
- state="started",
- local_ip=peer_addr,
- router_id=peer_addr,
- peer_ip=dut_lo_addr,
- local_asn=asn,
- peer_asn=asn,
- port=BGP_MONITOR_PORT,
- dump_script=CUSTOM_DUMP_SCRIPT_DEST)
- pytest_assert(wait_tcp_connection(localhost, ptfhost.mgmt_ip, BGP_MONITOR_PORT),
- "Failed to start bgp monitor session on PTF")
- yield
- # Cleanup bgp monitor
- duthost.shell("redis-cli -n 4 -c DEL 'BGP_MONITORS|{}'".format(peer_addr))
- ptfhost.exabgp(name=BGP_MONITOR_NAME, state="absent")
- ptfhost.file(path=CUSTOM_DUMP_SCRIPT_DEST, state="absent")
- ptfhost.file(path=DUMP_FILE, state="absent")
-
def get_traffic_shift_state(host):
outputs = host.shell('TSC')['stdout_lines']
for out in outputs:
@@ -88,37 +31,6 @@ def get_traffic_shift_state(host):
return TS_INCONSISTENT
pytest.fail("TSC return unexpected state {}".format(out))
-def parse_exabgp_dump(host):
- """
- Parse the dump file of exabgp, and build a set for checking routes
- """
- routes = set()
- output_lines = host.shell("cat {}".format(DUMP_FILE))['stdout_lines']
- for line in output_lines:
- routes.add(line)
- return routes
-
-def parse_rib(host, ip_ver):
- """
- Parse output of 'show bgp ipv4/6' and parse into a dict for checking routes
- """
- routes = {}
- cmd = "vtysh -c \"show bgp ipv%d json\"" % ip_ver
- route_data = json.loads(host.shell(cmd)['stdout'])
- for ip, nexthops in route_data['routes'].iteritems():
- aspath = set()
- for nexthop in nexthops:
- aspath.add(nexthop['path'])
- routes[ip] = aspath
- return routes
-
-def verify_all_routes_announce_to_bgpmon(routes_bgpmon, routes_dut):
- logger.info("Verifying all routes are announced to BGPMON")
- for route in routes_dut.keys():
- if route not in routes_bgpmon:
- return False
- return True
-
def parse_routes_on_eos(dut_host, neigh_hosts, ip_ver):
"""
Parse the output of 'show ip bgp neigh received-routes' on eos, and store in a dict
@@ -225,7 +137,7 @@ def verify_only_loopback_routes_are_announced_to_neighs(dut_host, neigh_hosts, c
return verify_loopback_route_with_community(dut_host, neigh_hosts, 4, community) and \
verify_loopback_route_with_community(dut_host, neigh_hosts, 6, community)
-def test_TSA(duthost, ptfhost, nbrhosts, common_setup_teardown, traffic_shift_community):
+def test_TSA(duthost, ptfhost, nbrhosts, bgpmon_setup_teardown, traffic_shift_community):
"""
Test TSA
Verify all routes are announced to bgp monitor, and only loopback routes are announced to neighs
@@ -236,14 +148,7 @@ def test_TSA(duthost, ptfhost, nbrhosts, common_setup_teardown, traffic_shift_co
# Verify DUT is in maintenance state.
pytest_assert(TS_MAINTENANCE == get_traffic_shift_state(duthost),
"DUT is not in maintenance state")
- # Start bgpmon on DUT
- logger.info("Starting bgpmon on DUT")
- duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
- time.sleep(BGP_ANNOUNCE_TIME)
- bgpmon_routes = parse_exabgp_dump(ptfhost)
- rib_v4 = parse_rib(duthost, 4)
- rib_v6 = parse_rib(duthost, 6)
- pytest_assert(verify_all_routes_announce_to_bgpmon(bgpmon_routes, dict(rib_v4.items() + rib_v6.items())),
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
"Not all routes are announced to bgpmon")
pytest_assert(verify_only_loopback_routes_are_announced_to_neighs(duthost, nbrhosts, traffic_shift_community),
"Failed to verify routes on eos in TSA")
@@ -251,7 +156,7 @@ def test_TSA(duthost, ptfhost, nbrhosts, common_setup_teardown, traffic_shift_co
# Recover to Normal state
duthost.shell("TSB")
-def test_TSB(duthost, ptfhost, nbrhosts, common_setup_teardown):
+def test_TSB(duthost, ptfhost, nbrhosts, bgpmon_setup_teardown):
"""
Test TSB.
Establish BGP session between PTF and DUT, and verify all routes are announced to bgp monitor,
@@ -262,16 +167,9 @@ def test_TSB(duthost, ptfhost, nbrhosts, common_setup_teardown):
# Verify DUT is in normal state.
pytest_assert(TS_NORMAL == get_traffic_shift_state(duthost),
"DUT is not in normal state")
- # Start bgpmon on DUT
- logger.info("Starting bgpmon on DUT")
- duthost.command("sonic-cfggen -j {} -w".format(BGPMON_CONFIG_FILE))
- time.sleep(BGP_ANNOUNCE_TIME)
- bgpmon_routes = parse_exabgp_dump(ptfhost)
- rib_v4 = parse_rib(duthost, 4)
- rib_v6 = parse_rib(duthost, 6)
- pytest_assert(verify_all_routes_announce_to_bgpmon(bgpmon_routes, dict(rib_v4.items() + rib_v6.items())),
+ pytest_assert(verify_all_routes_announce_to_bgpmon(duthost, ptfhost),
"Not all routes are announced to bgpmon")
- pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, rib_v4, 4),
+ pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, parse_rib(duthost, 4), 4),
"Not all ipv4 routes are announced to neighbors")
- pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, rib_v6, 6),
+ pytest_assert(verify_all_routes_announce_to_neighs(duthost, nbrhosts, parse_rib(duthost, 6), 6),
"Not all ipv6 routes are announced to neighbors")
diff --git a/tests/cacl/test_cacl_application.py b/tests/cacl/test_cacl_application.py
index 56be5855e10..03d99c4e5da 100644
--- a/tests/cacl/test_cacl_application.py
+++ b/tests/cacl/test_cacl_application.py
@@ -1,4 +1,5 @@
import ipaddress
+import json
import pytest
@@ -11,21 +12,42 @@
pytest.mark.topology('any')
]
+@pytest.fixture(scope="module")
+def docker_network(duthost):
+
+ output = duthost.command("docker inspect bridge")
+
+ docker_containers_info = json.loads(output['stdout'])[0]['Containers']
+ ipam_info = json.loads(output['stdout'])[0]['IPAM']
+
+ docker_network = {}
+ docker_network['bridge'] = {'IPv4Address' : ipam_info['Config'][0]['Gateway'],
+ 'IPv6Address' : ipam_info['Config'][1]['Gateway'] }
+
+ docker_network['container'] = {}
+ for k,v in docker_containers_info.items():
+ docker_network['container'][v['Name']] = {'IPv4Address' : v['IPv4Address'].split('/')[0], 'IPv6Address' : v['IPv6Address'].split('/')[0]}
+
+ return docker_network
+
# To specify a port range instead of a single port, use iptables format:
# separate start and end ports with a colon, e.g., "1000:2000"
ACL_SERVICES = {
"NTP": {
"ip_protocols": ["udp"],
- "dst_ports": ["123"]
+ "dst_ports": ["123"],
+ "multi_asic_ns_to_host_fwd": False
},
"SNMP": {
"ip_protocols": ["tcp", "udp"],
- "dst_ports": ["161"]
+ "dst_ports": ["161"],
+ "multi_asic_ns_to_host_fwd": True
},
"SSH": {
"ip_protocols": ["tcp"],
- "dst_ports": ["22"]
+ "dst_ports": ["22"],
+ "multi_asic_ns_to_host_fwd": True
}
}
@@ -129,7 +151,7 @@ def get_cacl_tables_and_rules(duthost):
return cacl_tables
-def generate_and_append_block_ip2me_traffic_rules(duthost, iptables_rules, ip6tables_rules):
+def generate_and_append_block_ip2me_traffic_rules(duthost, iptables_rules, ip6tables_rules, asic_index):
INTERFACE_TABLE_NAME_LIST = [
"LOOPBACK_INTERFACE",
"MGMT_INTERFACE",
@@ -139,8 +161,8 @@ def generate_and_append_block_ip2me_traffic_rules(duthost, iptables_rules, ip6ta
]
# Gather device configuration facts
- cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")["ansible_facts"]
-
+ namespace = duthost.get_namespace_from_asic_id(asic_index)
+ cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent", namespace=namespace)["ansible_facts"]
# Add iptables/ip6tables rules to drop all packets destined for peer-to-peer interface IP addresses
for iface_table_name in INTERFACE_TABLE_NAME_LIST:
if iface_table_name in cfg_facts:
@@ -161,7 +183,7 @@ def generate_and_append_block_ip2me_traffic_rules(duthost, iptables_rules, ip6ta
pytest.fail("Unrecognized IP address type on interface '{}': {}".format(iface_name, ip_ntwrk))
-def generate_expected_rules(duthost):
+def generate_expected_rules(duthost, docker_network, asic_index):
iptables_rules = []
ip6tables_rules = []
@@ -177,6 +199,26 @@ def generate_expected_rules(duthost):
iptables_rules.append("-A INPUT -s 127.0.0.1/32 -i lo -j ACCEPT")
ip6tables_rules.append("-A INPUT -s ::1/128 -i lo -j ACCEPT")
+ if asic_index is None:
+ # Allow Communication among docker containers
+ for k, v in docker_network['container'].items():
+ iptables_rules.append("-A INPUT -s {}/32 -d {}/32 -j ACCEPT".format(docker_network['bridge']['IPv4Address'], docker_network['bridge']['IPv4Address']))
+ iptables_rules.append("-A INPUT -s {}/32 -d {}/32 -j ACCEPT".format(v['IPv4Address'], docker_network['bridge']['IPv4Address']))
+ ip6tables_rules.append("-A INPUT -s {}/128 -d {}/128 -j ACCEPT".format(docker_network['bridge']['IPv6Address'], docker_network['bridge']['IPv6Address']))
+ ip6tables_rules.append("-A INPUT -s {}/128 -d {}/128 -j ACCEPT".format(v['IPv6Address'], docker_network['bridge']['IPv6Address']))
+
+ else:
+ iptables_rules.append("-A INPUT -s {}/32 -d {}/32 -j ACCEPT".format(docker_network['container']['database' + str(asic_index)]['IPv4Address'],
+ docker_network['container']['database' + str(asic_index)]['IPv4Address']))
+ iptables_rules.append("-A INPUT -s {}/32 -d {}/32 -j ACCEPT".format(docker_network['bridge']['IPv4Address'],
+ docker_network['container']['database' + str(asic_index)]['IPv4Address']))
+ ip6tables_rules.append("-A INPUT -s {}/128 -d {}/128 -j ACCEPT".format(docker_network['container']['database' + str(asic_index)]['IPv6Address'],
+ docker_network['container']['database' + str(asic_index)]['IPv6Address']))
+ ip6tables_rules.append("-A INPUT -s {}/128 -d {}/128 -j ACCEPT".format(docker_network['bridge']['IPv6Address'],
+ docker_network['container']['database' + str(asic_index)]['IPv6Address']))
+
+
+
# Allow all incoming packets from established connections or new connections
# which are related to established connections
iptables_rules.append("-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT")
@@ -299,7 +341,7 @@ def generate_expected_rules(duthost):
rules_applied_from_config += 1
# Append rules which block "ip2me" traffic on p2p interfaces
- generate_and_append_block_ip2me_traffic_rules(duthost, iptables_rules, ip6tables_rules)
+ generate_and_append_block_ip2me_traffic_rules(duthost, iptables_rules, ip6tables_rules, asic_index)
# Allow all packets with a TTL/hop limit of 0 or 1
iptables_rules.append("-A INPUT -m ttl --ttl-lt 2 -j ACCEPT")
@@ -313,19 +355,55 @@ def generate_expected_rules(duthost):
return iptables_rules, ip6tables_rules
+def generate_nat_expected_rules(duthost, docker_network, asic_index):
+ iptables_natrules = []
+ ip6tables_natrules = []
-def test_cacl_application(duthosts, rand_one_dut_hostname, localhost, creds):
- """
- Test case to ensure caclmgrd is applying control plane ACLs properly
-
- This is done by generating our own set of expected iptables and ip6tables
- rules based on the DuT's configuration and comparing them against the
- actual iptables/ip6tables rules on the DuT.
- """
- duthost = duthosts[rand_one_dut_hostname]
- expected_iptables_rules, expected_ip6tables_rules = generate_expected_rules(duthost)
-
- stdout = duthost.shell("sudo iptables -S")["stdout"]
+ # Default policies
+ iptables_natrules.append("-P PREROUTING ACCEPT")
+ iptables_natrules.append("-P INPUT ACCEPT")
+ iptables_natrules.append("-P OUTPUT ACCEPT")
+ iptables_natrules.append("-P POSTROUTING ACCEPT")
+ ip6tables_natrules.append("-P PREROUTING ACCEPT")
+ ip6tables_natrules.append("-P INPUT ACCEPT")
+ ip6tables_natrules.append("-P OUTPUT ACCEPT")
+ ip6tables_natrules.append("-P POSTROUTING ACCEPT")
+
+
+ for acl_service in ACL_SERVICES:
+ if ACL_SERVICES[acl_service]["multi_asic_ns_to_host_fwd"]:
+ for ip_protocol in ACL_SERVICES[acl_service]["ip_protocols"]:
+ for dst_port in ACL_SERVICES[acl_service]["dst_ports"]:
+ # IPv4 rules
+ iptables_natrules.append(
+ "-A PREROUTING -p {} -m {} --dport {} -j DNAT --to-destination {}".format
+ (ip_protocol, ip_protocol, dst_port,
+ docker_network['bridge']['IPv4Address']))
+
+ iptables_natrules.append(
+ "-A POSTROUTING -p {} -m {} --dport {} -j SNAT --to-source {}".format
+ (ip_protocol, ip_protocol, dst_port,
+ docker_network['container']['database' + str(asic_index)]['IPv4Address']))
+
+ # IPv6 rules
+ ip6tables_natrules.append(
+ "-A PREROUTING -p {} -m {} --dport {} -j DNAT --to-destination {}".format
+ (ip_protocol, ip_protocol, dst_port,
+ docker_network['bridge']['IPv6Address']))
+
+ ip6tables_natrules.append(
+ "-A POSTROUTING -p {} -m {} --dport {} -j SNAT --to-source {}".format
+ (ip_protocol,ip_protocol, dst_port,
+ docker_network['container']['database' + str(asic_index)]['IPv6Address']))
+
+ return iptables_natrules, ip6tables_natrules
+
+
+def verify_cacl(duthost, localhost, creds, docker_network, asic_index = None):
+ expected_iptables_rules, expected_ip6tables_rules = generate_expected_rules(duthost, docker_network, asic_index)
+
+
+ stdout = duthost.get_asic_or_sonic_host(asic_index).command("iptables -S")["stdout"]
actual_iptables_rules = stdout.strip().split("\n")
# Ensure all expected iptables rules are present on the DuT
@@ -344,7 +422,7 @@ def test_cacl_application(duthosts, rand_one_dut_hostname, localhost, creds):
#for i in range(len(expected_iptables_rules)):
# pytest_assert(actual_iptables_rules[i] == expected_iptables_rules[i], "iptables rules not in expected order")
- stdout = duthost.shell("sudo ip6tables -S")["stdout"]
+ stdout = duthost.get_asic_or_sonic_host(asic_index).command("ip6tables -S")["stdout"]
actual_ip6tables_rules = stdout.strip().split("\n")
# Ensure all expected ip6tables rules are present on the DuT
@@ -362,3 +440,48 @@ def test_cacl_application(duthosts, rand_one_dut_hostname, localhost, creds):
# Ensure the ip6tables rules are applied in the correct order
#for i in range(len(expected_ip6tables_rules)):
# pytest_assert(actual_ip6tables_rules[i] == expected_ip6tables_rules[i], "ip6tables rules not in expected order")
+
+def verify_nat_cacl(duthost, localhost, creds, docker_network, asic_index):
+ expected_iptables_rules, expected_ip6tables_rules = generate_nat_expected_rules(duthost, docker_network, asic_index)
+
+ stdout = duthost.get_asic_or_sonic_host(asic_index).command("iptables -t nat -S")["stdout"]
+ actual_iptables_rules = stdout.strip().split("\n")
+
+ # Ensure all expected iptables rules are present on the DuT
+ missing_iptables_rules = set(expected_iptables_rules) - set(actual_iptables_rules)
+ pytest_assert(len(missing_iptables_rules) == 0, "Missing expected iptables nat rules: {}".format(repr(missing_iptables_rules)))
+
+ # Ensure there are no unexpected iptables rules present on the DuT
+ unexpected_iptables_rules = set(actual_iptables_rules) - set(expected_iptables_rules)
+ pytest_assert(len(unexpected_iptables_rules) == 0, "Unexpected iptables nat rules: {}".format(repr(unexpected_iptables_rules)))
+
+ stdout = duthost.get_asic_or_sonic_host(asic_index).command("ip6tables -t nat -S")["stdout"]
+ actual_ip6tables_rules = stdout.strip().split("\n")
+
+ # Ensure all expected ip6tables rules are present on the DuT
+ missing_ip6tables_rules = set(expected_ip6tables_rules) - set(actual_ip6tables_rules)
+ pytest_assert(len(missing_ip6tables_rules) == 0, "Missing expected ip6tables nat rules: {}".format(repr(missing_ip6tables_rules)))
+
+ # Ensure there are no unexpected ip6tables rules present on the DuT
+ unexpected_ip6tables_rules = set(actual_ip6tables_rules) - set(expected_ip6tables_rules)
+ pytest_assert(len(unexpected_ip6tables_rules) == 0, "Unexpected ip6tables nat rules: {}".format(repr(unexpected_ip6tables_rules)))
+
+def test_cacl_application(duthosts, rand_one_dut_hostname, localhost, creds, docker_network):
+ """
+ Test case to ensure caclmgrd is applying control plane ACLs properly
+
+ This is done by generating our own set of expected iptables and ip6tables
+ rules based on the DuT's configuration and comparing them against the
+ actual iptables/ip6tables rules on the DuT.
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ verify_cacl(duthost, localhost, creds, docker_network)
+
+def test_multiasic_cacl_application(duthosts, rand_one_dut_hostname, localhost, creds,docker_network, enum_frontend_asic_index):
+
+ if enum_frontend_asic_index is None:
+ pytest.skip("Not Multi-asic platform. Skipping !!")
+
+ duthost = duthosts[rand_one_dut_hostname]
+ verify_cacl(duthost, localhost, creds, docker_network, enum_frontend_asic_index)
+ verify_nat_cacl(duthost, localhost, creds, docker_network, enum_frontend_asic_index)
diff --git a/tests/common/cache/facts_cache.md b/tests/common/cache/facts_cache.md
index a649514128b..cb887b58e05 100644
--- a/tests/common/cache/facts_cache.md
+++ b/tests/common/cache/facts_cache.md
@@ -35,17 +35,110 @@ When `testbed-cli.sh deploy-mg` is executed for specified testbed, the ansible p
There are two ways to use the cache function.
## Use decorator `facts_cache.py::cached`
+facts_cache.**cache**(*name, zone_getter=None, after_read=None, before_write=None*)
+* This function is a decorator that can be used to cache the result from the decorated function.
+ * arguments:
+ * `name`: the key name that result from the decorated function will be stored under.
+ * `zone_getter`: a function used to find a string that could be used as `zone`, must have three arguments defined: `(function, func_args, func_kargs)`, that `function` is the decorated function, `func_args` and `func_kargs` are those parameters passed the decorated function at runtime.
+ * `after_read`: a hook function used to process the cached facts after reading from cached file, must have four arguments defined: `(facts, function, func_args, func_kargs)`, `facts` is the just-read cached facts, `function`, `func_args` and `func_kargs` are the same as those in `zone_getter`.
+ * `before_write`: a hook function used to process the facts returned from decorated function, also must have four arguments defined: `(facts, function, func_args, func_kargs)`.
+
+### usage
+1. default usage to decorate methods in class `AnsibleHostBase` or its derivatives.
+```python
+from tests.common.cache import cached
+
+class SonicHost(AnsibleHostBase):
+
+ ...
+
+ @cached(name='basic_facts')
+ def _gather_facts(self):
+ ...
```
-from tests.common.cache import cached
+2. have custome zone getter function to retrieve zone from the argument `hostname` defined in the decorated function.
+```python
+import inspect
+
+
+def get_hostname(function, func_args, func_kargs)
+ args_binding = inspect.getcallargs(function, *func_args, **func_kargs)
+ return args_binding.get("hostname") or args_binding.get("kargs").get("hostname")
+
+
+@cached(name="host_variable", zone_getter=get_hostname)
+def get_host_visible_variable(inv_files, hostname):
+ pass
+```
+3. have custome `after_read` and `before_write` to validate that cached facts are within 24h.
+```python
+import datetime
+import time
+
+
+def validate_datetime_after_read(facts, function, func_args, func_kargs):
+ if facts is not Facts.NOEXIST:
+ timestamp = facts.get("cached_timestamp")
+ if timestamp:
+ delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(timestamp)
+ if delta.days == 0:
+ return facts["cached_facts"]
+ # if exceeds 24h, force the get the result from calling the decorated function
+ return FactsCache.NOTEXIST
+
+
+def add_datetime_before_write(facts, function, func_args, func_kargs):
+ return {"cached_timestamp": time.time(), "cached_facts": facts}
+
class SonicHost(AnsibleHostBase):
-...
+ ...
@cached(name='basic_facts')
def _gather_facts(self):
-...
+```
+2. have custome zone getter function to retrieve zone from the argument `hostname` defined in the decorated function.
+```python
+import inspect
+
+
+def get_hostname(function, func_args, func_kargs)
+ args_binding = inspect.getcallargs(function, *func_args, **func_kargs)
+ return args_binding.get("hostname") or args_binding.get("kargs").get("hostname")
+
+
+@cached(name="host_variable", zone_getter=get_hostname)
+def get_host_visible_variable(inv_files, hostname):
+ pass
+```
+3. have custome `after_read` and `before_write` to validate that cached facts are within 24h.
+```python
+import datetime
+import time
+
+
+def validate_datetime_after_read(facts, function, func_args, func_kargs):
+ timestamp = facts.get("cached_timestamp")
+ if timestamp:
+ delta = datetime.datetime.now() - datetime.datetime.fromtimestamp(timestamp)
+ if delta.days == 0:
+ return facts["cached_facts"]
+ # if exceeds 24h, force the get the result from calling the decorated function
+ return FactsCache.NOTEXIST
+
+
+def add_datetime_before_write(facts, function, func_args, func_kargs):
+ return {"cached_timestamp": time.time(), "cached_facts": facts}
+
+
+class SonicHost(AnsibleHostBase):
+
+ ...
+
+ @cached(name='basic_facts', after_read=validate_datetime_after_read, before_write=add_datetime_before_write)
+ def _gather_facts(self):
```
The `cached` decorator supports name argument which correspond to the `key` argument of `read(self, zone, key)` and `write(self, zone, key, value)`.
@@ -55,7 +148,7 @@ The `cached` decorator can only be used on an bound method of class which is sub
* Import FactsCache and grab the cache instance
-```
+```python
from tests.common.cache import FactsCache
cache = FactsCache()
@@ -63,7 +156,7 @@ cache = FactsCache()
* Use code like below
-```
+```python
def get_some_facts(self, *args):
cached_facts = cache.read(self.hostname, 'some_facts')
@@ -78,7 +171,7 @@ def get_some_facts(self, *args):
```
* Another example
-```
+```python
def get_something():
info = cache.read('common', 'some_info')
if info:
diff --git a/tests/common/cache/facts_cache.py b/tests/common/cache/facts_cache.py
index f5773a47cb9..01f7d4a743e 100644
--- a/tests/common/cache/facts_cache.py
+++ b/tests/common/cache/facts_cache.py
@@ -8,9 +8,9 @@
from collections import defaultdict
from threading import Lock
-
from six import with_metaclass
+
logger = logging.getLogger(__name__)
CURRENT_PATH = os.path.realpath(__file__)
@@ -41,6 +41,9 @@ class FactsCache(with_metaclass(Singleton, object)):
Args:
with_metaclass ([function]): Python 2&3 compatible function from the six library for adding metaclass.
"""
+
+ NOTEXIST = object()
+
def __init__(self, cache_location=CACHE_LOCATION):
self._cache_location = os.path.abspath(cache_location)
self._cache = defaultdict(dict)
@@ -87,7 +90,7 @@ def read(self, zone, key):
except (IOError, ValueError) as e:
logger.info('Load cache file "{}" failed with exception: {}'\
.format(os.path.abspath(facts_file), repr(e)))
- return None
+ return self.NOTEXIST
def write(self, zone, key, value):
"""Store facts to cache.
@@ -158,31 +161,57 @@ def cleanup(self, zone=None, key=None):
logger.error('Remove cache folder "{}" failed with exception: {}'\
.format(self._cache_location, repr(e)))
-def cached(name):
+
+def _get_hostname_as_zone(function, func_args, func_kargs):
+ """Default zone getter used for decorator cached."""
+ hostname = None
+ if func_args:
+ hostname = getattr(func_args[0], "hostname", None)
+ if not hostname or not isinstance(hostname, str):
+ raise ValueError("Failed to get attribute 'hostname' of type string from instance of type %s."
+ % type(func_args[0]))
+ return hostname
+
+
+def cached(name, zone_getter=None, after_read=None, before_write=None):
"""Decorator for enabling cache for facts.
The cached facts are to be stored by .pickle. Because the cached pickle files must be stored under subfolder
- specified by zone, this decorator can only be used for bound method of class which is subclass of AnsibleHostBase.
- The classes have attribute 'hostname' that can be used as zone.
+ specified by zone, the decorate have an option to passed a zone getter function used to get zone. The zone getter
+ function must have signature of '(function, func_args, func_kargs)' that 'function' is the decorated function,
+ 'func_args' and 'func_kargs' are the parameters passed to the decorated function at runtime. The zone getter function
+ should raise an error if it fails to return a string as zone.
+ With default zone getter function, this decorator can try to find zone:
+ if the function is a bound method of class AnsibleHostBase and its derivatives, it will try to use its
+ attribute 'hostname' as zone, or raises an error if 'hostname' doesn't exists or is not a string.
Args:
name ([str]): Name of the cached facts.
-
+ zone_getter ([function]): Function used to get hostname used as zone.
+ after_read ([function]): Hook function used to process facts after read from cache.
+ before_write ([function]): Hook function used to process facts before write into cache.
Returns:
[function]: Decorator function.
"""
cache = FactsCache()
+
def decorator(target):
- def wrapper(*args, **kwargs):
- hostname = getattr(args[0], 'hostname', None)
- if not hostname or not isinstance(hostname, str):
- raise Exception('Decorator is only applicable to bound method of class AnsibleHostBase and its sub-classes')
- cached_facts = cache.read(hostname, name)
- if cached_facts:
+ def wrapper(*args, **kargs):
+ _zone_getter = zone_getter or _get_hostname_as_zone
+ zone = _zone_getter(target, args, kargs)
+
+ cached_facts = cache.read(zone, name)
+ if after_read:
+ cached_facts = after_read(cached_facts, target, args, kargs)
+ if cached_facts is not FactsCache.NOTEXIST:
return cached_facts
else:
- facts = target(*args, **kwargs)
- cache.write(hostname, name, facts)
+ facts = target(*args, **kargs)
+ if before_write:
+ _facts = before_write(facts, target, args, kargs)
+ cache.write(zone, name, _facts)
+ else:
+ cache.write(zone, name, facts)
return facts
return wrapper
return decorator
diff --git a/tests/common/connections/__init__.py b/tests/common/connections/__init__.py
index d444f90d9f6..e69de29bb2d 100644
--- a/tests/common/connections/__init__.py
+++ b/tests/common/connections/__init__.py
@@ -1,34 +0,0 @@
-from tests.common.connections.base_console_conn import CONSOLE_SSH, CONSOLE_SSH_MENU_PORTS, CONSOLE_TELNET
-from telnet_console_conn import TelnetConsoleConn
-from ssh_console_conn import SSHConsoleConn
-
-__all__ = ["TelnetConsoleConn", "SSHConsoleConn"]
-
-ConsoleTypeMapper = {
- CONSOLE_TELNET: TelnetConsoleConn,
- CONSOLE_SSH: SSHConsoleConn,
- CONSOLE_SSH_MENU_PORTS: SSHConsoleConn
-}
-
-def ConsoleHost(console_type,
- console_host,
- console_port,
- sonic_username,
- sonic_password,
- console_username=None,
- console_password=None,
- timeout_s=100):
- if not ConsoleTypeMapper.has_key(console_type):
- raise ValueError("console type {} is not supported yet".format(console_type))
- params = {
- "console_host": console_host,
- "console_port": console_port,
- "console_type": console_type,
- "sonic_username": sonic_username,
- "sonic_password": sonic_password,
- "console_username": console_username,
- "console_password": console_password,
- "timeout": timeout_s
- }
- return ConsoleTypeMapper[console_type](**params)
-
diff --git a/tests/common/connections/base_console_conn.py b/tests/common/connections/base_console_conn.py
index 37dcc5e32c2..a12b70cf966 100644
--- a/tests/common/connections/base_console_conn.py
+++ b/tests/common/connections/base_console_conn.py
@@ -6,6 +6,14 @@
from netmiko.cisco_base_connection import CiscoBaseConnection
from netmiko.ssh_exception import NetMikoAuthenticationException
+# For interactive shell
+import sys
+import socket
+from paramiko.py3compat import u
+import termios
+import tty
+import select
+
# All supported console types
# Console login via telnet (mad console)
CONSOLE_TELNET = "console_telnet"
@@ -81,3 +89,30 @@ def cleanup(self):
def disconnect(self):
super(BaseConsoleConn, self).disconnect()
+ def posix_shell(self):
+ oldtty = termios.tcgetattr(sys.stdin)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ tty.setcbreak(sys.stdin.fileno())
+ self.remote_conn.settimeout(0.0)
+
+ while True:
+ r, w, e = select.select([self.remote_conn, sys.stdin], [], [])
+ if self.remote_conn in r:
+ try:
+ x = u(self.remote_conn.recv(1024))
+ if len(x) == 0:
+ sys.stdout.write("\r\n*** EOF\r\n")
+ break
+ sys.stdout.write(x)
+ sys.stdout.flush()
+ except socket.timeout:
+ pass
+ if sys.stdin in r:
+ x = sys.stdin.read(1)
+ if len(x) == 0:
+ break
+ self.remote_conn.send(x)
+
+ finally:
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
diff --git a/tests/common/connections/console_host.py b/tests/common/connections/console_host.py
new file mode 100644
index 00000000000..411ade3aa56
--- /dev/null
+++ b/tests/common/connections/console_host.py
@@ -0,0 +1,32 @@
+from base_console_conn import CONSOLE_SSH, CONSOLE_SSH_MENU_PORTS, CONSOLE_TELNET
+from telnet_console_conn import TelnetConsoleConn
+from ssh_console_conn import SSHConsoleConn
+
+ConsoleTypeMapper = {
+ CONSOLE_TELNET: TelnetConsoleConn,
+ CONSOLE_SSH: SSHConsoleConn,
+ CONSOLE_SSH_MENU_PORTS: SSHConsoleConn
+}
+
+def ConsoleHost(console_type,
+ console_host,
+ console_port,
+ sonic_username,
+ sonic_password,
+ console_username=None,
+ console_password=None,
+ timeout_s=100):
+ if not ConsoleTypeMapper.has_key(console_type):
+ raise ValueError("console type {} is not supported yet".format(console_type))
+ params = {
+ "console_host": console_host,
+ "console_port": console_port,
+ "console_type": console_type,
+ "sonic_username": sonic_username,
+ "sonic_password": sonic_password,
+ "console_username": console_username,
+ "console_password": console_password,
+ "timeout": timeout_s
+ }
+ return ConsoleTypeMapper[console_type](**params)
+
diff --git a/tests/common/connections/ssh_console_conn.py b/tests/common/connections/ssh_console_conn.py
index 20b1ac56b17..0bd7b124dcb 100644
--- a/tests/common/connections/ssh_console_conn.py
+++ b/tests/common/connections/ssh_console_conn.py
@@ -1,7 +1,6 @@
-from tests.common.connections.base_console_conn import CONSOLE_SSH
import time
import re
-from base_console_conn import BaseConsoleConn
+from base_console_conn import BaseConsoleConn, CONSOLE_SSH
from netmiko.ssh_exception import NetMikoAuthenticationException
class SSHConsoleConn(BaseConsoleConn):
diff --git a/tests/common/devices/__init__.py b/tests/common/devices/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/common/devices/base.py b/tests/common/devices/base.py
new file mode 100644
index 00000000000..e2faa57e383
--- /dev/null
+++ b/tests/common/devices/base.py
@@ -0,0 +1,91 @@
+import inspect
+import json
+import logging
+
+from multiprocessing.pool import ThreadPool
+
+from tests.common.errors import RunAnsibleModuleFail
+
+logger = logging.getLogger(__name__)
+
+# HACK: This is a hack for issue https://github.com/Azure/sonic-mgmt/issues/1941 and issue
+# https://github.com/ansible/pytest-ansible/issues/47
+# Detailed root cause analysis of the issue: https://github.com/Azure/sonic-mgmt/issues/1941#issuecomment-670434790
+# Before calling callback function of plugins to return ansible module result, ansible calls the
+# ansible.executor.task_result.TaskResult.clean_copy method to remove some keys like 'failed' and 'skipped' in the
+# result dict. The keys to be removed are defined in module variable ansible.executor.task_result._IGNORE. The trick
+# of this hack is to override this pre-defined key list. When the 'failed' key is not included in the list, ansible
+# will not remove it before returning the ansible result to plugins (pytest_ansible in our case)
+try:
+ from ansible.executor import task_result
+ task_result._IGNORE = ('skipped', )
+except Exception as e:
+ logging.error("Hack for https://github.com/ansible/pytest-ansible/issues/47 failed: {}".format(repr(e)))
+
+
+class AnsibleHostBase(object):
+ """
+ @summary: The base class for various objects.
+
+ This class filters an object from the ansible_adhoc fixture by hostname. The object can be considered as an
+ ansible host object although it is not under the hood. Anyway, we can use this object to run ansible module
+ on the host.
+ """
+
+ def __init__(self, ansible_adhoc, hostname, *args, **kwargs):
+ if hostname == 'localhost':
+ self.host = ansible_adhoc(connection='local', host_pattern=hostname)[hostname]
+ else:
+ self.host = ansible_adhoc(become=True, *args, **kwargs)[hostname]
+ self.mgmt_ip = self.host.options["inventory_manager"].get_host(hostname).vars["ansible_host"]
+ self.hostname = hostname
+
+ def __getattr__(self, module_name):
+ if self.host.has_module(module_name):
+ self.module_name = module_name
+ self.module = getattr(self.host, module_name)
+
+ return self._run
+ raise AttributeError(
+ "'%s' object has no attribute '%s'" % (self.__class__, module_name)
+ )
+
+ def _run(self, *module_args, **complex_args):
+
+ previous_frame = inspect.currentframe().f_back
+ filename, line_number, function_name, lines, index = inspect.getframeinfo(previous_frame)
+
+ verbose = complex_args.pop('verbose', True)
+
+ if verbose:
+ logging.debug("{}::{}#{}: [{}] AnsibleModule::{}, args={}, kwargs={}"\
+ .format(filename, function_name, line_number, self.hostname,
+ self.module_name, json.dumps(module_args), json.dumps(complex_args)))
+ else:
+ logging.debug("{}::{}#{}: [{}] AnsibleModule::{} executing..."\
+ .format(filename, function_name, line_number, self.hostname, self.module_name))
+
+ module_ignore_errors = complex_args.pop('module_ignore_errors', False)
+ module_async = complex_args.pop('module_async', False)
+
+ if module_async:
+ def run_module(module_args, complex_args):
+ return self.module(*module_args, **complex_args)[self.hostname]
+ pool = ThreadPool()
+ result = pool.apply_async(run_module, (module_args, complex_args))
+ return pool, result
+
+ res = self.module(*module_args, **complex_args)[self.hostname]
+
+ if verbose:
+ logging.debug("{}::{}#{}: [{}] AnsibleModule::{} Result => {}"\
+ .format(filename, function_name, line_number, self.hostname, self.module_name, json.dumps(res)))
+ else:
+ logging.debug("{}::{}#{}: [{}] AnsibleModule::{} done, is_failed={}, rc={}"\
+ .format(filename, function_name, line_number, self.hostname, self.module_name, \
+ res.get('is_failed', None), res.get('rc', None)))
+
+ if (res.is_failed or 'exception' in res) and not module_ignore_errors:
+ raise RunAnsibleModuleFail("run module {} failed".format(self.module_name), res)
+
+ return res
diff --git a/tests/common/devices/duthosts.py b/tests/common/devices/duthosts.py
new file mode 100644
index 00000000000..9c077c3f901
--- /dev/null
+++ b/tests/common/devices/duthosts.py
@@ -0,0 +1,117 @@
+import logging
+
+from tests.common.devices.multi_asic import MultiAsicSonicHost
+
+logger = logging.getLogger(__name__)
+
+
+class DutHosts(object):
+ """ Represents all the DUTs (nodes) in a testbed. class has 3 important attributes:
+ nodes: List of all the MultiAsicSonicHost instances for all the SONiC nodes (or cards for chassis) in a multi-dut testbed
+ frontend_nodes: subset of nodes and holds list of MultiAsicSonicHost instances for DUTs with front-panel ports (like linecards in chassis
+ supervisor_nodes: subset of nodes and holds list of MultiAsicSonicHost instances for supervisor cards.
+ """
+ class _Nodes(list):
+ """ Internal class representing a list of MultiAsicSonicHosts """
+ def _run_on_nodes(self, *module_args, **complex_args):
+ """ Delegate the call to each of the nodes, return the results in a dict."""
+ return {node.hostname: getattr(node, self.attr)(*module_args, **complex_args) for node in self}
+
+ def __getattr__(self, attr):
+ """ To support calling ansible modules on a list of MultiAsicSonicHost
+ Args:
+ attr: attribute to get
+
+ Returns:
+ a dictionary with key being the MultiAsicSonicHost's hostname, and value being the output of ansible module
+ on that MultiAsicSonicHost
+ """
+ self.attr = attr
+ return self._run_on_nodes
+
+ def __eq__(self, o):
+ """ To support eq operator on the DUTs (nodes) in the testbed """
+ return list.__eq__(o)
+
+ def __ne__(self, o):
+ """ To support ne operator on the DUTs (nodes) in the testbed """
+ return list.__ne__(o)
+
+ def __hash__(self):
+ """ To support hash operator on the DUTs (nodes) in the testbed """
+ return list.__hash__()
+
+ def __init__(self, ansible_adhoc, tbinfo):
+ """ Initialize a multi-dut testbed with all the DUT's defined in testbed info.
+
+ Args:
+ ansible_adhoc: The pytest-ansible fixture
+ tbinfo - Testbed info whose "duts" holds the hostnames for the DUT's in the multi-dut testbed.
+
+ """
+ # TODO: Initialize the nodes in parallel using multi-threads?
+ self.nodes = self._Nodes([MultiAsicSonicHost(ansible_adhoc, hostname) for hostname in tbinfo["duts"]])
+ self.supervisor_nodes = self._Nodes([node for node in self.nodes if node.is_supervisor_node()])
+ self.frontend_nodes = self._Nodes([node for node in self.nodes if node.is_frontend_node()])
+
+ def __getitem__(self, index):
+ """To support operations like duthosts[0] and duthost['sonic1_hostname']
+
+ Args:
+ index (int or string): Index or hostname of a duthost.
+
+ Raises:
+ KeyError: Raised when duthost with supplied hostname is not found.
+ IndexError: Raised when duthost with supplied index is not found.
+
+ Returns:
+ [MultiAsicSonicHost]: Returns the specified duthost in duthosts. It is an instance of MultiAsicSonicHost.
+ """
+ if type(index) == int:
+ return self.nodes[index]
+ elif type(index) in [ str, unicode ]:
+ for node in self.nodes:
+ if node.hostname == index:
+ return node
+ raise KeyError("No node has hostname '{}'".format(index))
+ else:
+ raise IndexError("Bad index '{}' type {}".format(index, type(index)))
+
+ # Below method are to support treating an instance of DutHosts as a list
+ def __iter__(self):
+ """ To support iteration over all the DUTs (nodes) in the testbed"""
+ return iter(self.nodes)
+
+ def __len__(self):
+ """ To support length of the number of DUTs (nodes) in the testbed """
+ return len(self.nodes)
+
+ def __eq__(self, o):
+ """ To support eq operator on the DUTs (nodes) in the testbed """
+ return self.nodes.__eq__(o)
+
+ def __ne__(self, o):
+ """ To support ne operator on the DUTs (nodes) in the testbed """
+ return self.nodes.__ne__(o)
+
+ def __hash__(self):
+ """ To support hash operator on the DUTs (nodes) in the testbed """
+ return self.nodes.__hash__()
+
+ def __getattr__(self, attr):
+ """To support calling ansible modules directly on all the DUTs (nodes) in the testbed
+ Args:
+ attr: attribute to get
+
+ Returns:
+ a dictionary with key being the MultiAsicSonicHost's hostname, and value being the output of ansible module
+ on that MultiAsicSonicHost
+ """
+ return getattr(self.nodes, attr)
+
+ def config_facts(self, *module_args, **complex_args):
+ result = {}
+ for node in self.nodes:
+ complex_args['host'] = node.hostname
+ result[node.hostname] = node.config_facts(*module_args, **complex_args)['ansible_facts']
+ return result
diff --git a/tests/common/devices/eos.py b/tests/common/devices/eos.py
new file mode 100644
index 00000000000..95cb4b1535e
--- /dev/null
+++ b/tests/common/devices/eos.py
@@ -0,0 +1,171 @@
+import ipaddress
+import json
+import logging
+
+from tests.common.devices.base import AnsibleHostBase
+
+logger = logging.getLogger(__name__)
+
+
+class EosHost(AnsibleHostBase):
+ """
+ @summary: Class for Eos switch
+
+ For running ansible module on the Eos switch
+ """
+
+ def __init__(self, ansible_adhoc, hostname, eos_user, eos_passwd, shell_user=None, shell_passwd=None, gather_facts=False):
+ '''Initialize an object for interacting with EoS type device using ansible modules
+
+ Args:
+ ansible_adhoc (): The pytest-ansible fixture
+ hostname (string): hostname of the EOS device
+ eos_user (string): Username for accessing the EOS CLI interface
+ eos_passwd (string): Password for the eos_user
+ shell_user (string, optional): Username for accessing the Linux shell CLI interface. Defaults to None.
+ shell_passwd (string, optional): Password for the shell_user. Defaults to None.
+ gather_facts (bool, optional): Whether to gather some basic facts. Defaults to False.
+ '''
+ self.eos_user = eos_user
+ self.eos_passwd = eos_passwd
+ self.shell_user = shell_user
+ self.shell_passwd = shell_passwd
+ AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
+ self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
+
+ def __getattr__(self, module_name):
+ if module_name.startswith('eos_'):
+ evars = {
+ 'ansible_connection':'network_cli',
+ 'ansible_network_os':'eos',
+ 'ansible_user': self.eos_user,
+ 'ansible_password': self.eos_passwd,
+ 'ansible_ssh_user': self.eos_user,
+ 'ansible_ssh_pass': self.eos_passwd,
+ 'ansible_become_method': 'enable'
+ }
+ else:
+ if not self.shell_user or not self.shell_passwd:
+ raise Exception("Please specify shell_user and shell_passwd for {}".format(self.hostname))
+ evars = {
+ 'ansible_connection':'ssh',
+ 'ansible_network_os':'linux',
+ 'ansible_user': self.shell_user,
+ 'ansible_password': self.shell_passwd,
+ 'ansible_ssh_user': self.shell_user,
+ 'ansible_ssh_pass': self.shell_passwd,
+ 'ansible_become_method': 'sudo'
+ }
+ self.host.options['variable_manager'].extra_vars.update(evars)
+ return super(EosHost, self).__getattr__(module_name)
+
+ def shutdown(self, interface_name):
+ out = self.eos_config(
+ lines=['shutdown'],
+ parents='interface %s' % interface_name)
+ logging.info('Shut interface [%s]' % interface_name)
+ return out
+
+ def no_shutdown(self, interface_name):
+ out = self.eos_config(
+ lines=['no shutdown'],
+ parents='interface %s' % interface_name)
+ logging.info('No shut interface [%s]' % interface_name)
+ return out
+
+ def check_intf_link_state(self, interface_name):
+ show_int_result = self.eos_command(
+ commands=['show interface %s' % interface_name])
+ return 'Up' in show_int_result['stdout_lines'][0]
+
+ def set_interface_lacp_rate_mode(self, interface_name, mode):
+ out = self.eos_config(
+ lines=['lacp rate %s' % mode],
+ parents='interface %s' % interface_name)
+
+ if out['failed'] == True:
+ # new eos deprecate lacp rate and use lacp timer command
+ out = self.eos_config(
+ lines=['lacp timer %s' % mode],
+ parents='interface %s' % interface_name)
+ if out['changed'] == False:
+ logging.warning("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
+ raise Exception("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
+ else:
+ logging.info("Set interface [%s] lacp timer to [%s]" % (interface_name, mode))
+ else:
+ logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode))
+ return out
+
+ def kill_bgpd(self):
+ out = self.eos_config(lines=['agent Rib shutdown'])
+ return out
+
+ def start_bgpd(self):
+ out = self.eos_config(lines=['no agent Rib shutdown'])
+ return out
+
+ def check_bgp_session_state(self, neigh_ips, neigh_desc, state="established"):
+ """
+ @summary: check if current bgp session equals to the target state
+
+ @param neigh_ips: bgp neighbor IPs
+ @param neigh_desc: bgp neighbor description
+ @param state: target state
+ """
+ neigh_ips = [ip.lower() for ip in neigh_ips]
+ neigh_ips_ok = []
+ neigh_desc_ok = []
+ neigh_desc_available = False
+
+ out_v4 = self.eos_command(
+ commands=['show ip bgp summary | json'])
+ logging.info("ip bgp summary: {}".format(out_v4))
+
+ out_v6 = self.eos_command(
+ commands=['show ipv6 bgp summary | json'])
+ logging.info("ipv6 bgp summary: {}".format(out_v6))
+
+ for k, v in out_v4['stdout'][0]['vrfs']['default']['peers'].items():
+ if v['peerState'].lower() == state.lower():
+ if k in neigh_ips:
+ neigh_ips_ok.append(k)
+ if 'description' in v:
+ neigh_desc_available = True
+ if v['description'] in neigh_desc:
+ neigh_desc_ok.append(v['description'])
+
+ for k, v in out_v6['stdout'][0]['vrfs']['default']['peers'].items():
+ if v['peerState'].lower() == state.lower():
+ if k.lower() in neigh_ips:
+ neigh_ips_ok.append(k)
+ if 'description' in v:
+ neigh_desc_available = True
+ if v['description'] in neigh_desc:
+ neigh_desc_ok.append(v['description'])
+ logging.info("neigh_ips_ok={} neigh_desc_available={} neigh_desc_ok={}"\
+ .format(str(neigh_ips_ok), str(neigh_desc_available), str(neigh_desc_ok)))
+ if neigh_desc_available:
+ if len(neigh_ips) == len(neigh_ips_ok) and len(neigh_desc) == len(neigh_desc_ok):
+ return True
+ else:
+ if len(neigh_ips) == len(neigh_ips_ok):
+ return True
+
+ return False
+
+ def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
+ playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvvvv'
+ cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory,
+ fanout_host=self.hostname, extra_vars=json.dumps(kwargs))
+ res = self.localhost.shell(cli_cmd)
+
+ if res["localhost"]["rc"] != 0:
+ raise Exception("Unable to execute template\n{}".format(res["stdout"]))
+
+ def get_route(self, prefix):
+ cmd = 'show ip bgp' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show ipv6 bgp'
+ return self.eos_command(commands=[{
+ 'command': '{} {}'.format(cmd, prefix),
+ 'output': 'json'
+ }])['stdout'][0]
diff --git a/tests/common/devices/fanout.py b/tests/common/devices/fanout.py
new file mode 100644
index 00000000000..6bb58ffc7e7
--- /dev/null
+++ b/tests/common/devices/fanout.py
@@ -0,0 +1,77 @@
+
+import logging
+
+from tests.common.devices.sonic import SonicHost
+from tests.common.devices.onyx import OnyxHost
+from tests.common.devices.ixia import IxiaHost
+from tests.common.devices.eos import EosHost
+
+logger = logging.getLogger(__name__)
+
+
+class FanoutHost(object):
+ """
+ @summary: Class for Fanout switch
+
+ For running ansible module on the Fanout switch
+ """
+
+ def __init__(self, ansible_adhoc, os, hostname, device_type, user, passwd, shell_user=None, shell_passwd=None):
+ self.hostname = hostname
+ self.type = device_type
+ self.host_to_fanout_port_map = {}
+ self.fanout_to_host_port_map = {}
+ if os == 'sonic':
+ self.os = os
+ self.host = SonicHost(ansible_adhoc, hostname,
+ shell_user=shell_user,
+ shell_passwd=shell_passwd)
+ elif os == 'onyx':
+ self.os = os
+ self.host = OnyxHost(ansible_adhoc, hostname, user, passwd)
+ elif os == 'ixia':
+ # TODO: add ixia chassis abstraction
+ self.os = os
+ self.host = IxiaHost(ansible_adhoc, os, hostname, device_type)
+ else:
+ # Use eos host if the os type is unknown
+ self.os = 'eos'
+ self.host = EosHost(ansible_adhoc, hostname, user, passwd, shell_user=shell_user, shell_passwd=shell_passwd)
+
+ def __getattr__(self, module_name):
+ return getattr(self.host, module_name)
+
+ def get_fanout_os(self):
+ return self.os
+
+ def get_fanout_type(self):
+ return self.type
+
+ def shutdown(self, interface_name):
+ return self.host.shutdown(interface_name)
+
+ def no_shutdown(self, interface_name):
+ return self.host.no_shutdown(interface_name)
+
+ def __str__(self):
+ return "{ os: '%s', hostname: '%s', device_type: '%s' }" % (self.os, self.hostname, self.type)
+
+ def __repr__(self):
+ return self.__str__()
+
+ def add_port_map(self, host_port, fanout_port):
+ """
+ Fanout switch is build from the connection graph of the
+ DUT. So each fanout switch instance is relevant to the
+ DUT instance in the test. As result the port mapping is
+ unique from the DUT perspective. However, this function
+ need update when supporting multiple DUT
+
+ host_port is a encoded string of |,
+ e.g. sample_host|Ethernet0.
+ """
+ self.host_to_fanout_port_map[host_port] = fanout_port
+ self.fanout_to_host_port_map[fanout_port] = host_port
+
+ def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
+ return self.host.exec_template(ansible_root, ansible_playbook, inventory, **kwargs)
diff --git a/tests/common/devices/ixia.py b/tests/common/devices/ixia.py
new file mode 100644
index 00000000000..0217d5ce33e
--- /dev/null
+++ b/tests/common/devices/ixia.py
@@ -0,0 +1,53 @@
+import logging
+
+from tests.common.devices.base import AnsibleHostBase
+
+logger = logging.getLogger(__name__)
+
+
+class IxiaHost (AnsibleHostBase):
+ """ This class is a place-holder for running ansible module on Ixia
+ fanout devices in future (TBD).
+ """
+ def __init__ (self, ansible_adhoc, os, hostname, device_type) :
+ """ Initializing Ixia fanout host for using ansible modules.
+
+ Note: Right now, it is just a place holder.
+
+ Args:
+ ansible_adhoc :The pytest-ansible fixture
+ os (str): The os type of Ixia Fanout.
+ hostname (str): The Ixia fanout host-name
+ device_type (str): The Ixia fanout device type.
+ """
+
+ self.ansible_adhoc = ansible_adhoc
+ self.os = os
+ self.hostname = hostname
+ self.device_type = device_type
+ super().__init__(IxiaHost, self)
+
+ def get_host_name (self):
+ """Returns the Ixia hostname
+
+ Args:
+ This function takes no argument.
+ """
+ return self.hostname
+
+ def get_os (self) :
+ """Returns the os type of the ixia device.
+
+ Args:
+ This function takes no argument.
+ """
+ return self.os
+
+ def execute (self, cmd) :
+ """Execute a given command on ixia fanout host.
+
+ Args:
+ cmd (str): Command to be executed.
+ """
+ if (self.os == 'ixia') :
+ eval(cmd)
diff --git a/tests/common/devices/k8s.py b/tests/common/devices/k8s.py
new file mode 100644
index 00000000000..8871bf23314
--- /dev/null
+++ b/tests/common/devices/k8s.py
@@ -0,0 +1,149 @@
+import logging
+import time
+
+from tests.common.devices.base import AnsibleHostBase
+
+logger = logging.getLogger(__name__)
+
+
+class K8sMasterHost(AnsibleHostBase):
+ """
+ @summary: Class for Ubuntu KVM that hosts Kubernetes master
+
+ For running ansible module on the K8s Ubuntu KVM host
+ """
+
+ def __init__(self, ansible_adhoc, hostname, is_haproxy):
+ """ Initialize an object for interacting with Ubuntu KVM using ansible modules
+
+ Args:
+ ansible_adhoc (): The pytest-ansible fixture
+ hostname (string): hostname of the Ubuntu KVM
+ is_haproxy (boolean): True if node is haproxy load balancer, False if node is backend master server
+
+ """
+ self.hostname = hostname
+ self.is_haproxy = is_haproxy
+ super(K8sMasterHost, self).__init__(ansible_adhoc, hostname)
+ evars = {
+ 'ansible_become_method': 'enable'
+ }
+ self.host.options['variable_manager'].extra_vars.update(evars)
+
+ def check_k8s_master_ready(self):
+ """
+ @summary: check if all Kubernetes master node statuses reflect target state "Ready"
+
+ """
+ k8s_nodes_statuses = self.shell('kubectl get nodes | grep master', module_ignore_errors=True)["stdout_lines"]
+ logging.info("k8s master node statuses: {}".format(k8s_nodes_statuses))
+
+ for line in k8s_nodes_statuses:
+ if "NotReady" in line:
+ return False
+ return True
+
+ def shutdown_api_server(self):
+ """
+ @summary: Shuts down API server container on one K8sMasterHost server
+
+ """
+ self.shell('sudo systemctl stop kubelet')
+ logging.info("Shutting down API server on backend master server hostname: {}".format(self.hostname))
+ api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout_lines"]
+ for id in api_server_container_ids:
+ self.shell('sudo docker kill {}'.format(id))
+ api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout"]
+ assert not api_server_container_ids
+
+ def start_api_server(self):
+ """
+ @summary: Starts API server container on one K8sMasterHost server
+
+ """
+ self.shell('sudo systemctl start kubelet')
+ logging.info("Starting API server on backend master server hostname: {}".format(self.hostname))
+ timeout_wait_secs = 60
+ poll_wait_secs = 5
+ api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout_lines"]
+ while ((len(api_server_container_ids) < 2) and (timeout_wait_secs > 0)):
+ logging.info("Waiting for Kubernetes API server to start")
+ time.sleep(poll_wait_secs)
+ timeout_wait_secs -= poll_wait_secs
+ api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout_lines"]
+ assert len(api_server_container_ids) > 1
+
+ def ensure_kubelet_running(self):
+ """
+ @summary: Ensures kubelet is running on one K8sMasterHost server
+
+ """
+ logging.info("Ensuring kubelet is started on {}".format(self.hostname))
+ kubelet_status = self.shell("sudo systemctl status kubelet | grep 'Active: '", module_ignore_errors=True)
+ for line in kubelet_status["stdout_lines"]:
+ if not "running" in line:
+ self.shell("sudo systemctl start kubelet")
+
+
+class K8sMasterCluster(object):
+ """
+ @summary: Class that encapsulates Kubernetes master cluster
+
+ For operating on a group of K8sMasterHost objects that compose one HA Kubernetes master cluster
+ """
+
+ def __init__(self, k8smasters):
+ """Initialize a list of backend master servers, and identify the HAProxy load balancer node
+
+ Args:
+ k8smasters: fixture that allows retrieval of K8sMasterHost objects
+
+ """
+ self.backend_masters = []
+ for hostname, k8smaster in k8smasters.items():
+ if k8smaster['host'].is_haproxy:
+ self.haproxy = k8smaster['host']
+ else:
+ self.backend_masters.append(k8smaster)
+
+ @property
+ def vip(self):
+ """
+ @summary: Retrieve VIP of Kubernetes master cluster
+
+ """
+ return self.haproxy.mgmt_ip
+
+ def shutdown_all_api_server(self):
+ """
+ @summary: shut down API server on all backend master servers
+
+ """
+ for k8smaster in self.backend_masters:
+ logger.info("Shutting down API Server on master node {}".format(k8smaster['host'].hostname))
+ k8smaster['host'].shutdown_api_server()
+
+ def start_all_api_server(self):
+ """
+ @summary: Start API server on all backend master servers
+
+ """
+ for k8smaster in self.backend_masters:
+ logger.info("Starting API server on master node {}".format(k8smaster['host'].hostname))
+ k8smaster['host'].start_api_server()
+
+ def check_k8s_masters_ready(self):
+ """
+ @summary: Ensure that Kubernetes master is in healthy state
+
+ """
+ for k8smaster in self.backend_masters:
+ assert k8smaster['host'].check_k8s_master_ready()
+
+ def ensure_all_kubelet_running(self):
+ """
+ @summary: Ensures kubelet is started on all backend masters, start kubelet if necessary
+
+ """
+ for k8smaster in self.backend_masters:
+ k8smaster['host'].ensure_kubelet_running()
diff --git a/tests/common/devices/local.py b/tests/common/devices/local.py
new file mode 100644
index 00000000000..917e82d643c
--- /dev/null
+++ b/tests/common/devices/local.py
@@ -0,0 +1,11 @@
+from tests.common.devices.base import AnsibleHostBase
+
+
+class Localhost(AnsibleHostBase):
+ """
+ @summary: Class for localhost
+
+ For running ansible module on localhost
+ """
+ def __init__(self, ansible_adhoc):
+ AnsibleHostBase.__init__(self, ansible_adhoc, "localhost")
diff --git a/tests/common/devices/multi_asic.py b/tests/common/devices/multi_asic.py
new file mode 100644
index 00000000000..a18bf9c104a
--- /dev/null
+++ b/tests/common/devices/multi_asic.py
@@ -0,0 +1,258 @@
+import copy
+import ipaddress
+import json
+import logging
+
+from tests.common.devices.sonic import SonicHost
+from tests.common.devices.sonic_asic import SonicAsic
+from tests.common.helpers.constants import DEFAULT_ASIC_ID, DEFAULT_NAMESPACE
+
+logger = logging.getLogger(__name__)
+
+
+class MultiAsicSonicHost(object):
+ """ This class represents a Multi-asic SonicHost It has two attributes:
+ sonic_host: a SonicHost instance. This object is for interacting with the SONiC host through pytest_ansible.
+ asics: a list of SonicAsic instances.
+
+ The 'duthost' fixture will return an instance of a MultiAsicSonicHost.
+ So, even a single asic pizza box is represented as a MultiAsicSonicHost with 1 SonicAsic.
+ """
+
+ _DEFAULT_SERVICES = ["pmon", "snmp", "lldp", "database"]
+
+ def __init__(self, ansible_adhoc, hostname):
+ """ Initializing a MultiAsicSonicHost.
+
+ Args:
+ ansible_adhoc : The pytest-ansible fixture
+ hostname: Name of the host in the ansible inventory
+ """
+ self.sonichost = SonicHost(ansible_adhoc, hostname)
+ self.asics = [SonicAsic(self.sonichost, asic_index) for asic_index in range(self.sonichost.facts["num_asic"])]
+
+ # Get the frontend and backend asics in a multiAsic device.
+ self.frontend_asics = []
+ self.backend_asics = []
+ if self.sonichost.is_multi_asic:
+ for asic in self.asics:
+ if asic.is_it_frontend():
+ self.frontend_asics.append(asic)
+ elif asic.is_it_backend():
+ self.backend_asics.append(asic)
+
+ self.critical_services_tracking_list()
+
+ def __repr__(self):
+ return ' {}'.format(self.hostname)
+
+ def critical_services_tracking_list(self):
+ """Get the list of services running on the DUT
+ The services on the sonic devices are:
+ - services running on the host
+ - services which are replicated per asic
+ Returns:
+ [list]: list of the services running the device
+ """
+ service_list = []
+ service_list+= self._DEFAULT_SERVICES
+ for asic in self.asics:
+ service_list += asic.get_critical_services()
+ self.sonichost.reset_critical_services_tracking_list(service_list)
+
+ def get_default_critical_services_list(self):
+ return self._DEFAULT_SERVICES
+
+ def _run_on_asics(self, *module_args, **complex_args):
+ """ Run an asible module on asics based on 'asic_index' keyword in complex_args
+
+ Args:
+ module_args: other ansible module args passed from the caller
+ complex_args: other ansible keyword args
+
+ Raises:
+ ValueError: if asic_index is specified and it is neither an int or string 'all'.
+ ValueError: if asic_index is specified and is an int, but greater than number of asics in the SonicHost
+
+ Returns:
+ if asic_index is not specified, then we return the output of the ansible module on global namespace (using SonicHost)
+ else
+ if asic_index is an int, the output of the ansible module on that asic namespace
+ - for single asic SonicHost this would still be the same as the ansible module on the global namespace
+ else if asic_index is string 'all', then a list of ansible module output for all the asics on the SonicHost
+ - for single asic, this would be a list of size 1.
+ """
+ if "asic_index" not in complex_args:
+ # Default ASIC/namespace
+ return getattr(self.sonichost, self.multi_asic_attr)(*module_args, **complex_args)
+ else:
+ asic_complex_args = copy.deepcopy(complex_args)
+ asic_index = asic_complex_args.pop("asic_index")
+ if type(asic_index) == int:
+ # Specific ASIC/namespace
+ if self.sonichost.facts['num_asic'] == 1:
+ if asic_index != 0:
+ raise ValueError("Trying to run module '{}' against asic_index '{}' on a single asic dut '{}'".format(self.multi_asic_attr, asic_index, self.sonichost.hostname))
+ return getattr(self.asics[asic_index], self.multi_asic_attr)(*module_args, **asic_complex_args)
+ elif type(asic_index) == str and asic_index.lower() == "all":
+ # All ASICs/namespace
+ return [getattr(asic, self.multi_asic_attr)(*module_args, **asic_complex_args) for asic in self.asics]
+ else:
+ raise ValueError("Argument 'asic_index' must be an int or string 'all'.")
+
+ def get_frontend_asic_ids(self):
+ if self.sonichost.facts['num_asic'] == 1:
+ return [DEFAULT_ASIC_ID]
+
+ return [asic.asic_index for asic in self.frontend_asics]
+
+ def get_frontend_asic_namespace_list(self):
+ if self.sonichost.facts['num_asic'] == 1:
+ return [DEFAULT_NAMESPACE]
+
+ return [asic.namespace for asic in self.frontend_asics]
+
+ def get_backend_asic_ids(self):
+ if self.sonichost.facts['num_asic'] == 1:
+ return [DEFAULT_ASIC_ID]
+
+ return [asic.asic_index for asic in self.backend_asics]
+
+ def get_backend_asic_namespace_list(self):
+ if self.sonichost.facts['num_asic'] == 1:
+ return [DEFAULT_NAMESPACE]
+
+ return [asic.namespace for asic in self.backend_asics]
+
+ def asic_instance(self, asic_index):
+ if asic_index is None:
+ return self.asics[0]
+ return self.asics[asic_index]
+
+ def asic_instance_from_namespace(self, namespace=DEFAULT_NAMESPACE):
+ if not namespace:
+ return self.asics[0]
+
+ for asic in self.asics:
+ if asic.namespace == namespace:
+ return asic
+ return None
+
+ def get_asic_ids(self):
+ if self.sonichost.facts['num_asic'] == 1:
+ return [DEFAULT_ASIC_ID]
+
+ return [asic.asic_index for asic in self.asics]
+
+ def get_asic_namespace_list(self):
+ if self.sonichost.facts['num_asic'] == 1:
+ return [DEFAULT_NAMESPACE]
+
+ return [asic.namespace for asic in self.asics]
+
+ def get_asic_id_from_namespace(self, namespace):
+ if self.sonichost.facts['num_asic'] == 1 or namespace == DEFAULT_NAMESPACE:
+ return DEFAULT_ASIC_ID
+
+ for asic in self.asics:
+ if namespace == asic.namespace:
+ return asic.asic_index
+
+ # Raise an error if we reach here
+ raise ValueError("Invalid namespace '{}' passed as input".format(namespace))
+
+ def get_namespace_from_asic_id(self, asic_id):
+ if self.sonichost.facts['num_asic'] == 1 or asic_id == DEFAULT_ASIC_ID:
+ return DEFAULT_NAMESPACE
+
+ for asic in self.asics:
+ if asic_id == asic.asic_index:
+ return asic.namespace
+
+ # Raise an error if we reach here
+ raise ValueError("Invalid asic_id '{}' passed as input".format(asic_id))
+
+ def get_vtysh_cmd_for_namespace(self, cmd, namespace):
+ asic_id = self.get_asic_id_from_namespace(namespace)
+ if asic_id == DEFAULT_ASIC_ID:
+ return cmd
+ ns_cmd = cmd.replace('vtysh', 'vtysh -n {}'.format(asic_id))
+ return ns_cmd
+
+ def get_linux_ip_cmd_for_namespace(self, cmd, namespace):
+ if not namespace:
+ return cmd
+ ns_cmd = cmd.replace('ip', 'ip -n {}'.format(namespace))
+ return ns_cmd
+
+ def get_route(self, prefix, namespace=DEFAULT_NAMESPACE):
+ asic_id = self.get_asic_id_from_namespace(namespace)
+ if asic_id == DEFAULT_ASIC_ID:
+ ns_prefix = ''
+ else:
+ ns_prefix = '-n ' + str(asic_id)
+ cmd = 'show bgp ipv4' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show bgp ipv6'
+ return json.loads(self.shell('vtysh {} -c "{} {} json"'.format(ns_prefix, cmd, prefix))['stdout'])
+
+ def __getattr__(self, attr):
+ """ To support calling an ansible module on a MultiAsicSonicHost.
+
+ Args:
+ attr: attribute to get
+
+ Returns:
+ if attr doesn't start with '_' and is a method of SonicAsic, attr will be ansible module that has dependency on ASIC,
+ return the output of the ansible module on asics requested - using _run_on_asics method.
+ else
+ return the attribute from SonicHost.
+ """
+ sonic_asic_attr = getattr(SonicAsic, attr, None)
+ if not attr.startswith("_") and sonic_asic_attr and callable(sonic_asic_attr):
+ self.multi_asic_attr = attr
+ return self._run_on_asics
+ else:
+ return getattr(self.sonichost, attr) # For backward compatibility
+
+ def get_asic_or_sonic_host(self, asic_id):
+ if asic_id == DEFAULT_ASIC_ID:
+ return self.sonichost
+ return self.asics[asic_id]
+
+ def stop_service(self, service):
+ if service in self._DEFAULT_SERVICES:
+ return self.sonichost.stop_service(service, service)
+
+ for asic in self.asics:
+ asic.stop_service(service)
+
+ def delete_container(self, service):
+ if service in self._DEFAULT_SERVICES:
+ return self.sonichost.delete_container(service)
+
+ for asic in self.asics:
+ asic.delete_container(service)
+
+ def is_container_running(self, service):
+ if service in self._DEFAULT_SERVICES:
+ return self.sonichost.is_container_running(service)
+
+ for asic in self.asics:
+ if asic.is_container_running(service):
+ return True
+
+ return False
+
+ def is_bgp_state_idle(self):
+ return self.sonichost.is_bgp_state_idle()
+
+ def is_service_running(self, service_name, docker_name=None):
+ docker_name = service_name if docker_name is None else docker_name
+
+ if docker_name in self._DEFAULT_SERVICES:
+ return self.sonichost.is_service_running(service_name, docker_name)
+
+ for asic in self.asics:
+ if not asic.is_service_running(service_name, docker_name):
+ return False
+
+ return True
diff --git a/tests/common/devices/onyx.py b/tests/common/devices/onyx.py
new file mode 100644
index 00000000000..4534b8af187
--- /dev/null
+++ b/tests/common/devices/onyx.py
@@ -0,0 +1,70 @@
+import json
+import logging
+
+from tests.common.devices.base import AnsibleHostBase
+
+logger = logging.getLogger(__name__)
+
+
+class OnyxHost(AnsibleHostBase):
+ """
+ @summary: Class for ONYX switch
+
+ For running ansible module on the ONYX switch
+ """
+
+ def __init__(self, ansible_adhoc, hostname, user, passwd, gather_facts=False):
+ AnsibleHostBase.__init__(self, ansible_adhoc, hostname, connection="network_cli")
+ evars = {'ansible_connection':'network_cli',
+ 'ansible_network_os':'onyx',
+ 'ansible_user': user,
+ 'ansible_password': passwd,
+ 'ansible_ssh_user': user,
+ 'ansible_ssh_pass': passwd,
+ 'ansible_become_method': 'enable'
+ }
+
+ self.host.options['variable_manager'].extra_vars.update(evars)
+ self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
+
+ def shutdown(self, interface_name):
+ out = self.host.onyx_config(
+ lines=['shutdown'],
+ parents='interface %s' % interface_name)
+ logging.info('Shut interface [%s]' % interface_name)
+ return out
+
+ def no_shutdown(self, interface_name):
+ out = self.host.onyx_config(
+ lines=['no shutdown'],
+ parents='interface %s' % interface_name)
+ logging.info('No shut interface [%s]' % interface_name)
+ return out
+
+ def check_intf_link_state(self, interface_name):
+ show_int_result = self.host.onyx_command(
+ commands=['show interfaces ethernet {} | include "Operational state"'.format(interface_name)])[self.hostname]
+ return 'Up' in show_int_result['stdout'][0]
+
+ def command(self, cmd):
+ out = self.host.onyx_command(commands=[cmd])
+ return out
+
+ def set_interface_lacp_rate_mode(self, interface_name, mode):
+ out = self.host.onyx_config(
+ lines=['lacp rate %s' % mode],
+ parents='interface ethernet %s' % interface_name)
+ logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode))
+ return out
+
+ def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
+ """
+ Execute ansible playbook with specified parameters
+ """
+ playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvvvv'
+ cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory,
+ fanout_host=self.hostname, extra_vars=json.dumps(kwargs))
+ res = self.localhost.shell(cli_cmd)
+
+ if res["localhost"]["rc"] != 0:
+ raise Exception("Unable to execute template\n{}".format(res["localhost"]["stdout"]))
diff --git a/tests/common/devices/ptf.py b/tests/common/devices/ptf.py
new file mode 100644
index 00000000000..2f5d46317c8
--- /dev/null
+++ b/tests/common/devices/ptf.py
@@ -0,0 +1,13 @@
+from tests.common.devices.base import AnsibleHostBase
+
+
+class PTFHost(AnsibleHostBase):
+ """
+ @summary: Class for PTF
+
+ Instance of this class can run ansible modules on the PTF host.
+ """
+ def __init__(self, ansible_adhoc, hostname):
+ AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
+
+ # TODO: Add a method for running PTF script
diff --git a/tests/common/devices.py b/tests/common/devices/sonic.py
similarity index 51%
rename from tests/common/devices.py
rename to tests/common/devices/sonic.py
index 1f70844bbb0..bac89738b68 100644
--- a/tests/common/devices.py
+++ b/tests/common/devices/sonic.py
@@ -1,126 +1,24 @@
-"""
-Classes for various devices that may be used in testing.
-There are other options for interacting with the devices used in testing, for example netmiko, fabric.
-We have a big number of customized ansible modules in the sonic-mgmt/ansible/library folder. To reused these
-modules, we have no other choice, at least for interacting with SONiC, localhost and PTF.
-
-We can consider using netmiko for interacting with the VMs used in testing.
-"""
+import ipaddress
import json
import logging
import os
import re
-import inspect
-import ipaddress
-import copy
-import time
-from multiprocessing.pool import ThreadPool
-from datetime import datetime
+
from collections import defaultdict
+from datetime import datetime
from ansible import constants
from ansible.plugins.loader import connection_loader
-from errors import RunAnsibleModuleFail
-from errors import UnsupportedAnsibleModule
-from tests.common.cache import cached
-from tests.common.helpers.constants import DEFAULT_ASIC_ID, DEFAULT_NAMESPACE, NAMESPACE_PREFIX
+from tests.common.devices.base import AnsibleHostBase
from tests.common.helpers.dut_utils import is_supervisor_node
-
-# HACK: This is a hack for issue https://github.com/Azure/sonic-mgmt/issues/1941 and issue
-# https://github.com/ansible/pytest-ansible/issues/47
-# Detailed root cause analysis of the issue: https://github.com/Azure/sonic-mgmt/issues/1941#issuecomment-670434790
-# Before calling callback function of plugins to return ansible module result, ansible calls the
-# ansible.executor.task_result.TaskResult.clean_copy method to remove some keys like 'failed' and 'skipped' in the
-# result dict. The keys to be removed are defined in module variable ansible.executor.task_result._IGNORE. The trick
-# of this hack is to override this pre-defined key list. When the 'failed' key is not included in the list, ansible
-# will not remove it before returning the ansible result to plugins (pytest_ansible in our case)
-try:
- from ansible.executor import task_result
- task_result._IGNORE = ('skipped', )
-except Exception as e:
- logging.error("Hack for https://github.com/ansible/pytest-ansible/issues/47 failed: {}".format(repr(e)))
+from tests.common.cache import cached
+from tests.common.helpers.constants import DEFAULT_ASIC_ID
+from tests.common.errors import RunAnsibleModuleFail
logger = logging.getLogger(__name__)
-class AnsibleHostBase(object):
- """
- @summary: The base class for various objects.
-
- This class filters an object from the ansible_adhoc fixture by hostname. The object can be considered as an
- ansible host object although it is not under the hood. Anyway, we can use this object to run ansible module
- on the host.
- """
-
- def __init__(self, ansible_adhoc, hostname, *args, **kwargs):
- if hostname == 'localhost':
- self.host = ansible_adhoc(connection='local', host_pattern=hostname)[hostname]
- else:
- self.host = ansible_adhoc(become=True, *args, **kwargs)[hostname]
- self.mgmt_ip = self.host.options["inventory_manager"].get_host(hostname).vars["ansible_host"]
- self.hostname = hostname
-
- def __getattr__(self, module_name):
- if self.host.has_module(module_name):
- self.module_name = module_name
- self.module = getattr(self.host, module_name)
-
- return self._run
- raise AttributeError(
- "'%s' object has no attribute '%s'" % (self.__class__, module_name)
- )
-
- def _run(self, *module_args, **complex_args):
-
- previous_frame = inspect.currentframe().f_back
- filename, line_number, function_name, lines, index = inspect.getframeinfo(previous_frame)
-
- logging.debug("{}::{}#{}: [{}] AnsibleModule::{}, args={}, kwargs={}"\
- .format(filename, function_name, line_number, self.hostname,
- self.module_name, json.dumps(module_args), json.dumps(complex_args)))
-
- module_ignore_errors = complex_args.pop('module_ignore_errors', False)
- module_async = complex_args.pop('module_async', False)
-
- if module_async:
- def run_module(module_args, complex_args):
- return self.module(*module_args, **complex_args)[self.hostname]
- pool = ThreadPool()
- result = pool.apply_async(run_module, (module_args, complex_args))
- return pool, result
-
- res = self.module(*module_args, **complex_args)[self.hostname]
- logging.debug("{}::{}#{}: [{}] AnsibleModule::{} Result => {}"\
- .format(filename, function_name, line_number, self.hostname, self.module_name, json.dumps(res)))
-
- if (res.is_failed or 'exception' in res) and not module_ignore_errors:
- raise RunAnsibleModuleFail("run module {} failed".format(self.module_name), res)
-
- return res
-
-
-class Localhost(AnsibleHostBase):
- """
- @summary: Class for localhost
-
- For running ansible module on localhost
- """
- def __init__(self, ansible_adhoc):
- AnsibleHostBase.__init__(self, ansible_adhoc, "localhost")
-
-
-class PTFHost(AnsibleHostBase):
- """
- @summary: Class for PTF
-
- Instance of this class can run ansible modules on the PTF host.
- """
- def __init__(self, ansible_adhoc, hostname):
- AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
-
- # TODO: Add a method for running PTF script
-
class SonicHost(AnsibleHostBase):
"""
@@ -390,7 +288,7 @@ def is_service_fully_started(self, service):
except:
return False
- def is_container_present(self, service):
+ def is_container_running(self, service):
"""
Checks where a container exits.
@@ -400,8 +298,7 @@ def is_container_present(self, service):
True or False
"""
status = self.command(
- "docker ps -f name={}".format(service),
- module_ignore_errors=True
+ "docker ps -f name={}".format(service), module_ignore_errors=True
)
if len(status["stdout_lines"]) > 1:
@@ -409,7 +306,7 @@ def is_container_present(self, service):
service, status["stdout"])
)
else:
- logging.info("container {} does not exist".format(service))
+ logging.info("container {} is not running".format(service))
return len(status["stdout_lines"]) > 1
@@ -436,7 +333,7 @@ def get_monit_services_status(self):
"""
monit_services_status = {}
- services_status_result = self.shell("sudo monit status", module_ignore_errors=True)
+ services_status_result = self.shell("sudo monit status", module_ignore_errors=True, verbose=False)
exit_code = services_status_result["rc"]
if exit_code != 0:
@@ -666,6 +563,43 @@ def get_swss_docker_names(self):
swss_docker_names.append("swss{}".format(asic))
return swss_docker_names
+ def get_namespace_ids(self, container_name):
+ """
+ Gets ids of namespace where the container should reside in.
+
+ Returns:
+ A list contains ids of namespace such as [DEFAULT_ASIC_ID, "0", "1", ...]}
+ """
+ has_global_scope = ""
+ has_per_asic_scope = ""
+ namespace_ids = []
+
+ num_asics = int(self.facts["num_asic"])
+ command_config_entry = "sonic-db-cli CONFIG_DB hgetall \"FEATURE|{}\"".format(container_name)
+ command_output = self.shell(command_config_entry)
+ exit_code = command_output["rc"]
+ if exit_code != 0:
+ return namespace_ids, False
+
+ config_info = command_output["stdout_lines"]
+ for index, item in enumerate(config_info):
+ if item == "has_global_scope":
+ has_global_scope = config_info[index + 1]
+ elif item == "has_per_asic_scope":
+ has_per_asic_scope = config_info[index + 1]
+
+ if num_asics > 1:
+ if has_global_scope == "True":
+ namespace_ids.append(DEFAULT_ASIC_ID)
+ if has_per_asic_scope == "True":
+ for asic_id in range(0, num_asics):
+ namespace_ids.append(str(asic_id))
+ else:
+ namespace_ids.append(DEFAULT_ASIC_ID)
+
+ return namespace_ids, True
+
+
def get_up_time(self):
up_time_text = self.command("uptime -s")["stdout"]
return datetime.strptime(up_time_text, "%Y-%m-%d %H:%M:%S")
@@ -1136,19 +1070,16 @@ def get_extended_minigraph_facts(self, tbinfo):
map = tbinfo['topo']['ptf_map'][str(dut_index)]
if map:
for port, index in mg_facts['minigraph_port_indices'].items():
- mg_facts['minigraph_ptf_indices'][port] = map[str(index)]
+ if str(index) in map:
+ mg_facts['minigraph_ptf_indices'][port] = map[str(index)]
except (ValueError, KeyError):
pass
return mg_facts
- def get_route(self, prefix):
- cmd = 'show bgp ipv4' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show bgp ipv6'
- return json.loads(self.shell('vtysh -c "{} {} json"'.format(cmd, prefix))['stdout'])
-
def run_redis_cli_cmd(self, redis_cmd):
cmd = "/usr/bin/redis-cli {}".format(redis_cmd)
- return self.command(cmd)
+ return self.command(cmd, verbose=False)
def get_asic_name(self):
asic = "unknown"
@@ -1162,11 +1093,13 @@ def get_asic_name(self):
asic = "td2"
elif "Broadcom Limited Device b870" in output:
asic = "td3"
+ elif "Broadcom Limited Device b980" in output:
+ asic = "th3"
return asic
def get_running_config_facts(self):
- return self.config_facts(host=self.hostname, source='running')['ansible_facts']
+ return self.config_facts(host=self.hostname, source='running', verbose=False)['ansible_facts']
def get_vlan_intfs(self):
'''
@@ -1291,14 +1224,22 @@ def stop_service(self, service_name, docker_name):
logging.debug("Stopped {}".format(service_name))
def delete_container(self, service):
- if self.is_container_present(service):
- self.command("docker rm {}".format(service))
+ self.command(
+ "docker rm {}".format(service), module_ignore_errors=True
+ )
def is_bgp_state_idle(self):
+ """
+ Check if all BGP peers are in IDLE state.
+
+ Returns:
+ True or False
+ """
bgp_summary = self.command("show ip bgp summary")["stdout_lines"]
idle_count = 0
expected_idle_count = 0
+ bgp_monitor_count = 0
for line in bgp_summary:
if "Idle (Admin)" in line:
idle_count += 1
@@ -1307,9 +1248,20 @@ def is_bgp_state_idle(self):
tokens = line.split()
expected_idle_count = int(tokens[-1])
- return idle_count == expected_idle_count
+ if "BGPMonitor" in line:
+ bgp_monitor_count += 1
+
+ return idle_count == (expected_idle_count - bgp_monitor_count)
def is_service_running(self, service_name, docker_name):
+ """
+ Check if service is running. Service can be a service within a docker
+
+ Args:
+ service name, docker name
+ Returns:
+ True or False
+ """
service_status = self.command(
"docker exec {} supervisorctl status {}".format(
docker_name, service_name
@@ -1323,1013 +1275,32 @@ def is_service_running(self, service_name, docker_name):
return "RUNNING" in service_status
-
-class K8sMasterHost(AnsibleHostBase):
- """
- @summary: Class for Ubuntu KVM that hosts Kubernetes master
-
- For running ansible module on the K8s Ubuntu KVM host
- """
-
- def __init__(self, ansible_adhoc, hostname, is_haproxy):
- """ Initialize an object for interacting with Ubuntu KVM using ansible modules
-
- Args:
- ansible_adhoc (): The pytest-ansible fixture
- hostname (string): hostname of the Ubuntu KVM
- is_haproxy (boolean): True if node is haproxy load balancer, False if node is backend master server
-
- """
- self.hostname = hostname
- self.is_haproxy = is_haproxy
- super(K8sMasterHost, self).__init__(ansible_adhoc, hostname)
- evars = {
- 'ansible_become_method': 'enable'
- }
- self.host.options['variable_manager'].extra_vars.update(evars)
-
- def check_k8s_master_ready(self):
- """
- @summary: check if all Kubernetes master node statuses reflect target state "Ready"
-
- """
- k8s_nodes_statuses = self.shell('kubectl get nodes | grep master', module_ignore_errors=True)["stdout_lines"]
- logging.info("k8s master node statuses: {}".format(k8s_nodes_statuses))
-
- for line in k8s_nodes_statuses:
- if "NotReady" in line:
- return False
- return True
-
- def shutdown_api_server(self):
- """
- @summary: Shuts down API server container on one K8sMasterHost server
-
- """
- self.shell('sudo systemctl stop kubelet')
- logging.info("Shutting down API server on backend master server hostname: {}".format(self.hostname))
- api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout_lines"]
- for id in api_server_container_ids:
- self.shell('sudo docker kill {}'.format(id))
- api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout"]
- assert not api_server_container_ids
-
- def start_api_server(self):
- """
- @summary: Starts API server container on one K8sMasterHost server
-
- """
- self.shell('sudo systemctl start kubelet')
- logging.info("Starting API server on backend master server hostname: {}".format(self.hostname))
- timeout_wait_secs = 60
- poll_wait_secs = 5
- api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout_lines"]
- while ((len(api_server_container_ids) < 2) and (timeout_wait_secs > 0)):
- logging.info("Waiting for Kubernetes API server to start")
- time.sleep(poll_wait_secs)
- timeout_wait_secs -= poll_wait_secs
- api_server_container_ids = self.shell('sudo docker ps -qf "name=apiserver"')["stdout_lines"]
- assert len(api_server_container_ids) > 1
-
- def ensure_kubelet_running(self):
- """
- @summary: Ensures kubelet is running on one K8sMasterHost server
-
- """
- logging.info("Ensuring kubelet is started on {}".format(self.hostname))
- kubelet_status = self.shell("sudo systemctl status kubelet | grep 'Active: '", module_ignore_errors=True)
- for line in kubelet_status["stdout_lines"]:
- if not "running" in line:
- self.shell("sudo systemctl start kubelet")
-
-
-class K8sMasterCluster(object):
- """
- @summary: Class that encapsulates Kubernetes master cluster
-
- For operating on a group of K8sMasterHost objects that compose one HA Kubernetes master cluster
- """
-
- def __init__(self, k8smasters):
- """Initialize a list of backend master servers, and identify the HAProxy load balancer node
-
- Args:
- k8smasters: fixture that allows retrieval of K8sMasterHost objects
-
- """
- self.backend_masters = []
- for hostname, k8smaster in k8smasters.items():
- if k8smaster['host'].is_haproxy:
- self.haproxy = k8smaster['host']
- else:
- self.backend_masters.append(k8smaster)
-
- @property
- def vip(self):
- """
- @summary: Retrieve VIP of Kubernetes master cluster
-
- """
- return self.haproxy.mgmt_ip
-
- def shutdown_all_api_server(self):
- """
- @summary: shut down API server on all backend master servers
-
- """
- for k8smaster in self.backend_masters:
- logger.info("Shutting down API Server on master node {}".format(k8smaster['host'].hostname))
- k8smaster['host'].shutdown_api_server()
-
- def start_all_api_server(self):
- """
- @summary: Start API server on all backend master servers
-
- """
- for k8smaster in self.backend_masters:
- logger.info("Starting API server on master node {}".format(k8smaster['host'].hostname))
- k8smaster['host'].start_api_server()
-
- def check_k8s_masters_ready(self):
+ def remove_ssh_tunnel_sai_rpc(self):
"""
- @summary: Ensure that Kubernetes master is in healthy state
-
- """
- for k8smaster in self.backend_masters:
- assert k8smaster['host'].check_k8s_master_ready()
-
- def ensure_all_kubelet_running(self):
- """
- @summary: Ensures kubelet is started on all backend masters, start kubelet if necessary
-
- """
- for k8smaster in self.backend_masters:
- k8smaster['host'].ensure_kubelet_running()
-
-
-class EosHost(AnsibleHostBase):
- """
- @summary: Class for Eos switch
-
- For running ansible module on the Eos switch
- """
-
- def __init__(self, ansible_adhoc, hostname, eos_user, eos_passwd, shell_user=None, shell_passwd=None, gather_facts=False):
- '''Initialize an object for interacting with EoS type device using ansible modules
-
- Args:
- ansible_adhoc (): The pytest-ansible fixture
- hostname (string): hostname of the EOS device
- eos_user (string): Username for accessing the EOS CLI interface
- eos_passwd (string): Password for the eos_user
- shell_user (string, optional): Username for accessing the Linux shell CLI interface. Defaults to None.
- shell_passwd (string, optional): Password for the shell_user. Defaults to None.
- gather_facts (bool, optional): Whether to gather some basic facts. Defaults to False.
- '''
- self.eos_user = eos_user
- self.eos_passwd = eos_passwd
- self.shell_user = shell_user
- self.shell_passwd = shell_passwd
- AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
- self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
-
- def __getattr__(self, module_name):
- if module_name.startswith('eos_'):
- evars = {
- 'ansible_connection':'network_cli',
- 'ansible_network_os':'eos',
- 'ansible_user': self.eos_user,
- 'ansible_password': self.eos_passwd,
- 'ansible_ssh_user': self.eos_user,
- 'ansible_ssh_pass': self.eos_passwd,
- 'ansible_become_method': 'enable'
- }
- else:
- if not self.shell_user or not self.shell_passwd:
- raise Exception("Please specify shell_user and shell_passwd for {}".format(self.hostname))
- evars = {
- 'ansible_connection':'ssh',
- 'ansible_network_os':'linux',
- 'ansible_user': self.shell_user,
- 'ansible_password': self.shell_passwd,
- 'ansible_ssh_user': self.shell_user,
- 'ansible_ssh_pass': self.shell_passwd,
- 'ansible_become_method': 'sudo'
- }
- self.host.options['variable_manager'].extra_vars.update(evars)
- return super(EosHost, self).__getattr__(module_name)
-
- def shutdown(self, interface_name):
- out = self.eos_config(
- lines=['shutdown'],
- parents='interface %s' % interface_name)
- logging.info('Shut interface [%s]' % interface_name)
- return out
-
- def no_shutdown(self, interface_name):
- out = self.eos_config(
- lines=['no shutdown'],
- parents='interface %s' % interface_name)
- logging.info('No shut interface [%s]' % interface_name)
- return out
-
- def check_intf_link_state(self, interface_name):
- show_int_result = self.eos_command(
- commands=['show interface %s' % interface_name])
- return 'Up' in show_int_result['stdout_lines'][0]
-
- def set_interface_lacp_rate_mode(self, interface_name, mode):
- out = self.eos_config(
- lines=['lacp rate %s' % mode],
- parents='interface %s' % interface_name)
-
- if out['failed'] == True:
- # new eos deprecate lacp rate and use lacp timer command
- out = self.eos_config(
- lines=['lacp timer %s' % mode],
- parents='interface %s' % interface_name)
- if out['changed'] == False:
- logging.warning("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
- raise Exception("Unable to set interface [%s] lacp timer to [%s]" % (interface_name, mode))
- else:
- logging.info("Set interface [%s] lacp timer to [%s]" % (interface_name, mode))
- else:
- logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode))
- return out
-
- def kill_bgpd(self):
- out = self.eos_config(lines=['agent Rib shutdown'])
- return out
-
- def start_bgpd(self):
- out = self.eos_config(lines=['no agent Rib shutdown'])
- return out
-
- def check_bgp_session_state(self, neigh_ips, neigh_desc, state="established"):
- """
- @summary: check if current bgp session equals to the target state
-
- @param neigh_ips: bgp neighbor IPs
- @param neigh_desc: bgp neighbor description
- @param state: target state
- """
- neigh_ips = [ip.lower() for ip in neigh_ips]
- neigh_ips_ok = []
- neigh_desc_ok = []
- neigh_desc_available = False
-
- out_v4 = self.eos_command(
- commands=['show ip bgp summary | json'])
- logging.info("ip bgp summary: {}".format(out_v4))
-
- out_v6 = self.eos_command(
- commands=['show ipv6 bgp summary | json'])
- logging.info("ipv6 bgp summary: {}".format(out_v6))
-
- for k, v in out_v4['stdout'][0]['vrfs']['default']['peers'].items():
- if v['peerState'].lower() == state.lower():
- if k in neigh_ips:
- neigh_ips_ok.append(k)
- if 'description' in v:
- neigh_desc_available = True
- if v['description'] in neigh_desc:
- neigh_desc_ok.append(v['description'])
-
- for k, v in out_v6['stdout'][0]['vrfs']['default']['peers'].items():
- if v['peerState'].lower() == state.lower():
- if k.lower() in neigh_ips:
- neigh_ips_ok.append(k)
- if 'description' in v:
- neigh_desc_available = True
- if v['description'] in neigh_desc:
- neigh_desc_ok.append(v['description'])
- logging.info("neigh_ips_ok={} neigh_desc_available={} neigh_desc_ok={}"\
- .format(str(neigh_ips_ok), str(neigh_desc_available), str(neigh_desc_ok)))
- if neigh_desc_available:
- if len(neigh_ips) == len(neigh_ips_ok) and len(neigh_desc) == len(neigh_desc_ok):
- return True
- else:
- if len(neigh_ips) == len(neigh_ips_ok):
- return True
-
- return False
-
- def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
- playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvvvv'
- cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory,
- fanout_host=self.hostname, extra_vars=json.dumps(kwargs))
- res = self.localhost.shell(cli_cmd)
-
- if res["localhost"]["rc"] != 0:
- raise Exception("Unable to execute template\n{}".format(res["stdout"]))
-
- def get_route(self, prefix):
- cmd = 'show ip bgp' if ipaddress.ip_network(unicode(prefix)).version == 4 else 'show ipv6 bgp'
- return self.eos_command(commands=[{
- 'command': '{} {}'.format(cmd, prefix),
- 'output': 'json'
- }])['stdout'][0]
-
-
-class OnyxHost(AnsibleHostBase):
- """
- @summary: Class for ONYX switch
-
- For running ansible module on the ONYX switch
- """
-
- def __init__(self, ansible_adhoc, hostname, user, passwd, gather_facts=False):
- AnsibleHostBase.__init__(self, ansible_adhoc, hostname, connection="network_cli")
- evars = {'ansible_connection':'network_cli',
- 'ansible_network_os':'onyx',
- 'ansible_user': user,
- 'ansible_password': passwd,
- 'ansible_ssh_user': user,
- 'ansible_ssh_pass': passwd,
- 'ansible_become_method': 'enable'
- }
-
- self.host.options['variable_manager'].extra_vars.update(evars)
- self.localhost = ansible_adhoc(inventory='localhost', connection='local', host_pattern="localhost")["localhost"]
-
- def shutdown(self, interface_name):
- out = self.host.onyx_config(
- lines=['shutdown'],
- parents='interface %s' % interface_name)
- logging.info('Shut interface [%s]' % interface_name)
- return out
-
- def no_shutdown(self, interface_name):
- out = self.host.onyx_config(
- lines=['no shutdown'],
- parents='interface %s' % interface_name)
- logging.info('No shut interface [%s]' % interface_name)
- return out
-
- def check_intf_link_state(self, interface_name):
- show_int_result = self.host.onyx_command(
- commands=['show interfaces ethernet {} | include "Operational state"'.format(interface_name)])[self.hostname]
- return 'Up' in show_int_result['stdout'][0]
-
- def command(self, cmd):
- out = self.host.onyx_command(commands=[cmd])
- return out
-
- def set_interface_lacp_rate_mode(self, interface_name, mode):
- out = self.host.onyx_config(
- lines=['lacp rate %s' % mode],
- parents='interface ethernet %s' % interface_name)
- logging.info("Set interface [%s] lacp rate to [%s]" % (interface_name, mode))
- return out
-
- def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
- """
- Execute ansible playbook with specified parameters
- """
- playbook_template = 'cd {ansible_path}; ansible-playbook {playbook} -i {inventory} -l {fanout_host} --extra-vars \'{extra_vars}\' -vvvvv'
- cli_cmd = playbook_template.format(ansible_path=ansible_root, playbook=ansible_playbook, inventory=inventory,
- fanout_host=self.hostname, extra_vars=json.dumps(kwargs))
- res = self.localhost.shell(cli_cmd)
-
- if res["localhost"]["rc"] != 0:
- raise Exception("Unable to execute template\n{}".format(res["localhost"]["stdout"]))
-
-
-class IxiaHost (AnsibleHostBase):
- """ This class is a place-holder for running ansible module on Ixia
- fanout devices in future (TBD).
- """
- def __init__ (self, ansible_adhoc, os, hostname, device_type) :
- """ Initializing Ixia fanout host for using ansible modules.
-
- Note: Right now, it is just a place holder.
-
- Args:
- ansible_adhoc :The pytest-ansible fixture
- os (str): The os type of Ixia Fanout.
- hostname (str): The Ixia fanout host-name
- device_type (str): The Ixia fanout device type.
- """
-
- self.ansible_adhoc = ansible_adhoc
- self.os = os
- self.hostname = hostname
- self.device_type = device_type
- super().__init__(IxiaHost, self)
-
- def get_host_name (self):
- """Returns the Ixia hostname
-
- Args:
- This function takes no argument.
- """
- return self.hostname
-
- def get_os (self) :
- """Returns the os type of the ixia device.
-
- Args:
- This function takes no argument.
- """
- return self.os
-
- def execute (self, cmd) :
- """Execute a given command on ixia fanout host.
-
- Args:
- cmd (str): Command to be executed.
- """
- if (self.os == 'ixia') :
- eval(cmd)
-
-
-class SonicAsic(object):
- """ This class represents an ASIC on a SONiC host. This class implements wrapper methods for ASIC/namespace related operations.
- The purpose is to hide the complexity of handling ASIC/namespace specific details.
- For example, passing asic_id, namespace, instance_id etc. to ansible module to deal with namespaces.
- """
-
- _DEFAULT_ASIC_SERVICES = ["bgp", "database", "lldp", "swss", "syncd", "teamd"]
- _MULTI_ASIC_SERVICE_NAME = "{}@{}" # service name, asic_id
- _MULTI_ASIC_DOCKER_NAME = "{}{}" # docker name, asic_id
-
- def __init__(self, sonichost, asic_index):
- """ Initializing a ASIC on a SONiC host.
-
- Args:
- sonichost : SonicHost object to which this asic belongs
- asic_index: ASIC / namespace id for this asic.
- """
- self.sonichost = sonichost
- self.asic_index = asic_index
- if self.sonichost.is_multi_asic:
- self.namespace = "{}{}".format(NAMESPACE_PREFIX, self.asic_index)
- self.cli_ns_option = "-n {}".format(self.namespace)
- else:
- # set the namespace to DEFAULT_NAMESPACE(None) for single asic
- self.namespace = DEFAULT_NAMESPACE
- self.cli_ns_option = ""
-
- def get_critical_services(self):
- """This function returns the list of the critical services
- for the namespace(asic)
-
- If the dut is multi asic, then the asic_id is appended t0 the
- _DEFAULT_ASIC_SERVICES list
- Returns:
- [list]: list of the services running the namespace/asic
- """
- a_service = []
- for service in self._DEFAULT_ASIC_SERVICES:
- a_service.append("{}{}".format(
- service, self.asic_index if self.sonichost.is_multi_asic else ""))
- return a_service
-
- def get_service_name(self, service):
- service_name = "{}{}".format(service, "@{}".format(self.asic_index) if self.sonichost.is_multi_asic else "")
- return service_name
-
- def is_it_frontend(self):
- if self.sonichost.is_multi_asic:
- sub_role_cmd = 'sudo sonic-cfggen -d -v DEVICE_METADATA.localhost.sub_role -n {}'.format(self.namespace)
- sub_role = self.sonichost.shell(sub_role_cmd)["stdout_lines"][0].decode("utf-8")
- if sub_role is not None and sub_role.lower() == 'frontend':
- return True
- return False
-
- def is_it_backend(self):
- if self.sonichost.is_multi_asic:
- sub_role_cmd = 'sudo sonic-cfggen -d -v DEVICE_METADATA.localhost.sub_role -n {}'.format(self.namespace)
- sub_role = self.sonichost.shell(sub_role_cmd)["stdout_lines"][0].decode("utf-8")
- if sub_role is not None and sub_role.lower() == 'backend':
- return True
- return False
-
- def get_docker_cmd(self, cmd, container_name):
- if self.sonichost.is_multi_asic:
- return "sudo docker exec {}{} {}".format(container_name, self.asic_index, cmd)
- return cmd
-
- def get_asic_namespace(self):
- if self.sonichost.is_multi_asic:
- return self.namespace
- return DEFAULT_NAMESPACE
-
- def bgp_facts(self, *module_args, **complex_args):
- """ Wrapper method for bgp_facts ansible module.
- If number of asics in SonicHost are more than 1, then add 'instance_id' param for this Asic
-
- Args:
- module_args: other ansible module args passed from the caller
- complex_args: other ansible keyword args
+ Removes any ssh tunnels if present created for syncd RPC communication
Returns:
- if SonicHost has only 1 asic, then return the bgp_facts for the global namespace, else bgp_facts for the bgp instance for my asic_index.
+ None
"""
- if self.sonichost.facts['num_asic'] != 1:
- complex_args['instance_id'] = self.asic_index
- return self.sonichost.bgp_facts(*module_args, **complex_args)
-
- def config_facts(self, *module_args, **complex_args):
- """ Wrapper method for config_facts ansible module.
- If number of asics in SonicHost are more than 1, then add 'namespace' param for this Asic
- If 'host' is not specified in complex_args, add it - as it is a mandatory param for the config_facts module
-
- Args:
- module_args: other ansible module args passed from the caller
- complex_args: other ansible keyword args
-
- Returns:
- if SonicHost has only 1 asic, then return the config_facts for the global namespace, else config_facts for namespace for my asic_index.
- """
- if 'host' not in complex_args:
- complex_args['host'] = self.sonichost.hostname
- if self.sonichost.is_multi_asic:
- complex_args['namespace'] = self.namespace
- return self.sonichost.config_facts(*module_args, **complex_args)
-
- def show_interface(self, *module_args, **complex_args):
- """Wrapper for the ansible module 'show_interface'
-
- Args:
- module_args: other ansible module args passed from the caller
- complex_args: other ansible keyword args
-
- Returns:
- [dict]: [the output of show interface status command]
- """
- complex_args['namespace'] = self.namespace
- return self.sonichost.show_interface(*module_args, **complex_args)
-
- def show_ip_interface(self, *module_args, **complex_args):
- """Wrapper for the ansible module 'show_ip_interface'
-
- Args:
- module_args: other ansible module args passed from the caller
- complex_args: other ansible keyword args
-
- Returns:
- [dict]: [the output of show interface status command]
- """
- complex_args['namespace'] = self.namespace
- return self.sonichost.show_ip_interface(*module_args, **complex_args)
-
- def run_redis_cli_cmd(self, redis_cmd):
- if self.namespace != DEFAULT_NAMESPACE:
- redis_cli = "/usr/bin/redis-cli"
- cmd = "sudo ip netns exec {} {} {}".format(self.namespace, redis_cli,redis_cmd)
- return self.sonichost.command(cmd)
- # for single asic platforms there are not Namespaces, so the redis-cli command is same the DUT host
- return self.sonichost.run_redis_cli_cmd(redis_cmd)
-
- def get_ip_route_info(self, dstip):
- return self.sonichost.get_ip_route_info(dstip, self.cli_ns_option)
-
- @property
- def os_version(self):
- return self.sonichost.os_version
-
- def interface_facts(self, *module_args, **complex_args):
- """Wrapper for the interface_facts ansible module.
-
- Args:
- module_args: other ansible module args passed from the caller
- complex_args: other ansible keyword args
-
- Returns:
- For a single ASIC platform, the namespace = DEFAULT_NAMESPACE, will retrieve interface facts for the global namespace
- In case of multi-asic, if namespace = , will retrieve interface facts for that namespace.
- """
- complex_args['namespace'] = self.namespace
- return self.sonichost.interface_facts(*module_args, **complex_args)
-
-
- def stop_service(self, service):
- if not self.sonichost.is_multi_asic:
- service_name = service
- docker_name = service
- else:
- service_name = self._MULTI_ASIC_SERVICE_NAME.format(
- service, self.asic_index
- )
- docker_name = self._MULTI_ASIC_DOCKER_NAME.format(
- service, self.asic_index
- )
- return self.sonichost.stop_service(service_name, docker_name)
-
- def delete_container(self, service):
- if self.sonichost.is_multi_asic:
- service = self._MULTI_ASIC_DOCKER_NAME.format(
- service, self.asic_index
- )
- return self.sonichost.delete_container(service)
-
- def is_container_present(self, service):
- if self.sonichost.is_multi_asic:
- service = self._MULTI_ASIC_DOCKER_NAME.format(
- service, self.asic_index
- )
- return self.sonichost.is_container_present(service)
-
- def is_service_running(self, service_name, docker_name):
- if self.sonichost.is_multi_asic:
- docker_name = self._MULTI_ASIC_DOCKER_NAME.format(
- docker_name, self.asic_index
- )
- return self.sonichost.is_service_running(service_name, docker_name)
-
-
-class MultiAsicSonicHost(object):
- """ This class represents a Multi-asic SonicHost It has two attributes:
- sonic_host: a SonicHost instance. This object is for interacting with the SONiC host through pytest_ansible.
- asics: a list of SonicAsic instances.
-
- The 'duthost' fixture will return an instance of a MultiAsicSonicHost.
- So, even a single asic pizza box is represented as a MultiAsicSonicHost with 1 SonicAsic.
- """
-
- _DEFAULT_SERVICES = ["pmon", "snmp", "lldp", "database"]
-
- def __init__(self, ansible_adhoc, hostname):
- """ Initializing a MultiAsicSonicHost.
-
- Args:
- ansible_adhoc : The pytest-ansible fixture
- hostname: Name of the host in the ansible inventory
- """
- self.sonichost = SonicHost(ansible_adhoc, hostname)
- self.asics = [SonicAsic(self.sonichost, asic_index) for asic_index in range(self.sonichost.facts["num_asic"])]
-
- # Get the frontend and backend asics in a multiAsic device.
- self.frontend_asics = []
- self.backend_asics = []
- if self.sonichost.is_multi_asic:
- for asic in self.asics:
- if asic.is_it_frontend():
- self.frontend_asics.append(asic)
- elif asic.is_it_backend():
- self.backend_asics.append(asic)
-
- self.critical_services_tracking_list()
-
- def critical_services_tracking_list(self):
- """Get the list of services running on the DUT
- The services on the sonic devices are:
- - services running on the host
- - services which are replicated per asic
- Returns:
- [list]: list of the services running the device
- """
- service_list = []
- service_list+= self._DEFAULT_SERVICES
- for asic in self.asics:
- service_list += asic.get_critical_services()
- self.sonichost.reset_critical_services_tracking_list(service_list)
-
- def get_default_critical_services_list(self):
- return self._DEFAULT_SERVICES
-
- def _run_on_asics(self, *module_args, **complex_args):
- """ Run an asible module on asics based on 'asic_index' keyword in complex_args
-
- Args:
- module_args: other ansible module args passed from the caller
- complex_args: other ansible keyword args
-
- Raises:
- ValueError: if asic_index is specified and it is neither an int or string 'all'.
- ValueError: if asic_index is specified and is an int, but greater than number of asics in the SonicHost
-
- Returns:
- if asic_index is not specified, then we return the output of the ansible module on global namespace (using SonicHost)
- else
- if asic_index is an int, the output of the ansible module on that asic namespace
- - for single asic SonicHost this would still be the same as the ansible module on the global namespace
- else if asic_index is string 'all', then a list of ansible module output for all the asics on the SonicHost
- - for single asic, this would be a list of size 1.
- """
- if "asic_index" not in complex_args:
- # Default ASIC/namespace
- return getattr(self.sonichost, self.multi_asic_attr)(*module_args, **complex_args)
- else:
- asic_complex_args = copy.deepcopy(complex_args)
- asic_index = asic_complex_args.pop("asic_index")
- if type(asic_index) == int:
- # Specific ASIC/namespace
- if self.sonichost.facts['num_asic'] == 1:
- if asic_index != 0:
- raise ValueError("Trying to run module '{}' against asic_index '{}' on a single asic dut '{}'".format(self.multi_asic_attr, asic_index, self.sonichost.hostname))
- return getattr(self.asics[asic_index], self.multi_asic_attr)(*module_args, **asic_complex_args)
- elif type(asic_index) == str and asic_index.lower() == "all":
- # All ASICs/namespace
- return [getattr(asic, self.multi_asic_attr)(*module_args, **asic_complex_args) for asic in self.asics]
- else:
- raise ValueError("Argument 'asic_index' must be an int or string 'all'.")
-
- def get_frontend_asic_ids(self):
- if self.sonichost.facts['num_asic'] == 1:
- return [DEFAULT_ASIC_ID]
-
- return [asic.asic_index for asic in self.frontend_asics]
-
- def get_frontend_asic_namespace_list(self):
- if self.sonichost.facts['num_asic'] == 1:
- return [DEFAULT_NAMESPACE]
-
- return [asic.namespace for asic in self.frontend_asics]
-
- def get_backend_asic_ids(self):
- if self.sonichost.facts['num_asic'] == 1:
- return [DEFAULT_ASIC_ID]
-
- return [asic.asic_index for asic in self.backend_asics]
-
- def get_backend_asic_namespace_list(self):
- if self.sonichost.facts['num_asic'] == 1:
- return [DEFAULT_NAMESPACE]
-
- return [asic.namespace for asic in self.backend_asics]
-
- def get_asic_ids(self):
- if self.sonichost.facts['num_asic'] == 1:
- return [DEFAULT_ASIC_ID]
-
- return [asic.asic_index for asic in self.asics]
-
- def get_asic_namespace_list(self):
- if self.sonichost.facts['num_asic'] == 1:
- return [DEFAULT_NAMESPACE]
-
- return [asic.namespace for asic in self.asics]
-
- def get_asic_id_from_namespace(self, namespace):
- if self.sonichost.facts['num_asic'] == 1:
- return DEFAULT_ASIC_ID
-
- for asic in self.asics:
- if namespace == asic.namespace:
- return asic.asic_index
-
- # Raise an error if we reach here
- raise ValueError("Invalid namespace '{}' passed as input".format(namespace))
-
- def get_namespace_from_asic_id(self, asic_id):
- if self.sonichost.facts['num_asic'] == 1:
- return DEFAULT_NAMESPACE
-
- for asic in self.asics:
- if asic_id == asic.asic_index:
- return asic.namespace
-
- # Raise an error if we reach here
- raise ValueError("Invalid asic_id '{}' passed as input".format(asic_id))
-
- def get_vtysh_cmd_for_namespace(self, cmd, namespace):
- asic_id = self.get_asic_id_from_namespace(namespace)
- if asic_id == DEFAULT_ASIC_ID:
- return cmd
- ns_cmd = cmd.replace('vtysh', 'vtysh -n {}'.format(asic_id))
- return ns_cmd
-
- def __getattr__(self, attr):
- """ To support calling an ansible module on a MultiAsicSonicHost.
-
- Args:
- attr: attribute to get
-
- Returns:
- if attr doesn't start with '_' and is a method of SonicAsic, attr will be ansible module that has dependency on ASIC,
- return the output of the ansible module on asics requested - using _run_on_asics method.
- else
- return the attribute from SonicHost.
- """
- sonic_asic_attr = getattr(SonicAsic, attr, None)
- if not attr.startswith("_") and sonic_asic_attr and callable(sonic_asic_attr):
- self.multi_asic_attr = attr
- return self._run_on_asics
- else:
- return getattr(self.sonichost, attr) # For backward compatibility
-
- def get_asic(self, asic_id):
- if asic_id == DEFAULT_ASIC_ID:
- return self.asics[0]
- return self.asics[asic_id]
-
- def stop_service(self, service):
- if service in self._DEFAULT_SERVICES:
- return self.sonichost.stop_service(service, service)
-
- for asic in self.asics:
- asic.stop_service(service)
-
- def delete_container(self, service):
- if service in self._DEFAULT_SERVICES:
- return self.sonichost.delete_container(service)
-
- for asic in self.asics:
- asic.delete_container(service)
-
- def is_container_present(self, service):
- if service in self._DEFAULT_SERVICES:
- return self.sonichost.is_container_present(service)
-
- for asic in self.asics:
- if asic.is_container_present(service):
- return True
-
- return False
-
- def is_bgp_state_idle(self):
- return self.sonichost.is_bgp_state_idle()
-
- def is_service_running(self, service_name, docker_name=None):
- docker_name = service_name if docker_name is None else docker_name
-
- if docker_name in self._DEFAULT_SERVICES:
- return self.sonichost.is_service_running(service_name, docker_name)
-
- for asic in self.asics:
- if not asic.is_service_running(service_name, docker_name):
- return False
-
- return True
-
-
-class DutHosts(object):
- """ Represents all the DUTs (nodes) in a testbed. class has 3 important attributes:
- nodes: List of all the MultiAsicSonicHost instances for all the SONiC nodes (or cards for chassis) in a multi-dut testbed
- frontend_nodes: subset of nodes and holds list of MultiAsicSonicHost instances for DUTs with front-panel ports (like linecards in chassis
- supervisor_nodes: subset of nodes and holds list of MultiAsicSonicHost instances for supervisor cards.
- """
- class _Nodes(list):
- """ Internal class representing a list of MultiAsicSonicHosts """
- def _run_on_nodes(self, *module_args, **complex_args):
- """ Delegate the call to each of the nodes, return the results in a dict."""
- return {node.hostname: getattr(node, self.attr)(*module_args, **complex_args) for node in self}
-
- def __getattr__(self, attr):
- """ To support calling ansible modules on a list of MultiAsicSonicHost
- Args:
- attr: attribute to get
-
- Returns:
- a dictionary with key being the MultiAsicSonicHost's hostname, and value being the output of ansible module
- on that MultiAsicSonicHost
- """
- self.attr = attr
- return self._run_on_nodes
-
- def __eq__(self, o):
- """ To support eq operator on the DUTs (nodes) in the testbed """
- return list.__eq__(o)
-
- def __ne__(self, o):
- """ To support ne operator on the DUTs (nodes) in the testbed """
- return list.__ne__(o)
-
- def __hash__(self):
- """ To support hash operator on the DUTs (nodes) in the testbed """
- return list.__hash__()
-
- def __init__(self, ansible_adhoc, tbinfo):
- """ Initialize a multi-dut testbed with all the DUT's defined in testbed info.
-
- Args:
- ansible_adhoc: The pytest-ansible fixture
- tbinfo - Testbed info whose "duts" holds the hostnames for the DUT's in the multi-dut testbed.
-
- """
- # TODO: Initialize the nodes in parallel using multi-threads?
- self.nodes = self._Nodes([MultiAsicSonicHost(ansible_adhoc, hostname) for hostname in tbinfo["duts"]])
- self.supervisor_nodes = self._Nodes([node for node in self.nodes if node.is_supervisor_node()])
- self.frontend_nodes = self._Nodes([node for node in self.nodes if node.is_frontend_node()])
-
- def __getitem__(self, index):
- """To support operations like duthosts[0] and duthost['sonic1_hostname']
-
- Args:
- index (int or string): Index or hostname of a duthost.
-
- Raises:
- KeyError: Raised when duthost with supplied hostname is not found.
- IndexError: Raised when duthost with supplied index is not found.
-
- Returns:
- [MultiAsicSonicHost]: Returns the specified duthost in duthosts. It is an instance of MultiAsicSonicHost.
- """
- if type(index) == int:
- return self.nodes[index]
- elif type(index) in [ str, unicode ]:
- for node in self.nodes:
- if node.hostname == index:
- return node
- raise KeyError("No node has hostname '{}'".format(index))
- else:
- raise IndexError("Bad index '{}' type {}".format(index, type(index)))
-
- # Below method are to support treating an instance of DutHosts as a list
- def __iter__(self):
- """ To support iteration over all the DUTs (nodes) in the testbed"""
- return iter(self.nodes)
-
- def __len__(self):
- """ To support length of the number of DUTs (nodes) in the testbed """
- return len(self.nodes)
-
- def __eq__(self, o):
- """ To support eq operator on the DUTs (nodes) in the testbed """
- return self.nodes.__eq__(o)
-
- def __ne__(self, o):
- """ To support ne operator on the DUTs (nodes) in the testbed """
- return self.nodes.__ne__(o)
-
- def __hash__(self):
- """ To support hash operator on the DUTs (nodes) in the testbed """
- return self.nodes.__hash__()
-
- def __getattr__(self, attr):
- """To support calling ansible modules directly on all the DUTs (nodes) in the testbed
- Args:
- attr: attribute to get
-
- Returns:
- a dictionary with key being the MultiAsicSonicHost's hostname, and value being the output of ansible module
- on that MultiAsicSonicHost
- """
- return getattr(self.nodes, attr)
-
- def config_facts(self, *module_args, **complex_args):
- result = {}
- for node in self.nodes:
- complex_args['host'] = node.hostname
- result[node.hostname] = node.config_facts(*module_args, **complex_args)['ansible_facts']
- return result
-
-
-class FanoutHost(object):
- """
- @summary: Class for Fanout switch
-
- For running ansible module on the Fanout switch
- """
-
- def __init__(self, ansible_adhoc, os, hostname, device_type, user, passwd, shell_user=None, shell_passwd=None):
- self.hostname = hostname
- self.type = device_type
- self.host_to_fanout_port_map = {}
- self.fanout_to_host_port_map = {}
- if os == 'sonic':
- self.os = os
- self.host = SonicHost(ansible_adhoc, hostname,
- shell_user=shell_user,
- shell_passwd=shell_passwd)
- elif os == 'onyx':
- self.os = os
- self.host = OnyxHost(ansible_adhoc, hostname, user, passwd)
- elif os == 'ixia':
- # TODO: add ixia chassis abstraction
- self.os = os
- self.host = IxiaHost(ansible_adhoc, os, hostname, device_type)
- else:
- # Use eos host if the os type is unknown
- self.os = 'eos'
- self.host = EosHost(ansible_adhoc, hostname, user, passwd, shell_user=shell_user, shell_passwd=shell_passwd)
-
- def __getattr__(self, module_name):
- return getattr(self.host, module_name)
-
- def get_fanout_os(self):
- return self.os
-
- def get_fanout_type(self):
- return self.type
-
- def shutdown(self, interface_name):
- return self.host.shutdown(interface_name)
-
- def no_shutdown(self, interface_name):
- return self.host.no_shutdown(interface_name)
-
- def __str__(self):
- return "{ os: '%s', hostname: '%s', device_type: '%s' }" % (self.os, self.hostname, self.type)
-
- def __repr__(self):
- return self.__str__()
+ try:
+ pid_list = self.shell(
+ 'pgrep -f "ssh -o StrictHostKeyChecking=no -fN -L \*:9092"'
+ )["stdout_lines"]
+ except RunAnsibleModuleFail:
+ return
+ for pid in pid_list:
+ self.shell("kill {}".format(pid))
- def add_port_map(self, host_port, fanout_port):
+ def get_up_ip_ports(self):
"""
- Fanout switch is build from the connection graph of the
- DUT. So each fanout switch instance is relevant to the
- DUT instance in the test. As result the port mapping is
- unique from the DUT perspective. However, this function
- need update when supporting multiple DUT
-
- host_port is a encoded string of |,
- e.g. sample_host|Ethernet0.
+ Get a list for all up ip interfaces
"""
- self.host_to_fanout_port_map[host_port] = fanout_port
- self.fanout_to_host_port_map[fanout_port] = host_port
-
- def exec_template(self, ansible_root, ansible_playbook, inventory, **kwargs):
- return self.host.exec_template(ansible_root, ansible_playbook, inventory, **kwargs)
+ up_ip_ports = []
+ ip_intf_facts = self.show_ip_interface()['ansible_facts']['ip_interfaces']
+ for intf in ip_intf_facts:
+ try:
+ if ip_intf_facts[intf]['oper_state'] == 'up':
+ up_ip_ports.append(intf)
+ except KeyError:
+ pass
+ return up_ip_ports
diff --git a/tests/common/devices/sonic_asic.py b/tests/common/devices/sonic_asic.py
new file mode 100644
index 00000000000..a0d3b38548b
--- /dev/null
+++ b/tests/common/devices/sonic_asic.py
@@ -0,0 +1,371 @@
+import logging
+import socket
+
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.constants import DEFAULT_NAMESPACE, NAMESPACE_PREFIX
+from tests.common.errors import RunAnsibleModuleFail
+from tests.common.platform.ssh_utils import ssh_authorize_local_user
+
+logger = logging.getLogger(__name__)
+
+
+class SonicAsic(object):
+ """ This class represents an ASIC on a SONiC host. This class implements wrapper methods for ASIC/namespace related operations.
+ The purpose is to hide the complexity of handling ASIC/namespace specific details.
+ For example, passing asic_id, namespace, instance_id etc. to ansible module to deal with namespaces.
+ """
+
+ _DEFAULT_ASIC_SERVICES = ["bgp", "database", "lldp", "swss", "syncd", "teamd"]
+ _MULTI_ASIC_SERVICE_NAME = "{}@{}" # service name, asic_id
+ _MULTI_ASIC_DOCKER_NAME = "{}{}" # docker name, asic_id
+
+ def __init__(self, sonichost, asic_index):
+ """ Initializing a ASIC on a SONiC host.
+
+ Args:
+ sonichost : SonicHost object to which this asic belongs
+ asic_index: ASIC / namespace id for this asic.
+ """
+ self.sonichost = sonichost
+ self.asic_index = asic_index
+ self._ns_arg = ""
+ if self.sonichost.is_multi_asic:
+ self.namespace = "{}{}".format(NAMESPACE_PREFIX, self.asic_index)
+ self.cli_ns_option = "-n {}".format(self.namespace)
+ self._ns_arg = "sudo ip netns exec {} ".format(self.namespace)
+ else:
+ # set the namespace to DEFAULT_NAMESPACE(None) for single asic
+ self.namespace = DEFAULT_NAMESPACE
+ self.cli_ns_option = ""
+
+ def get_critical_services(self):
+ """This function returns the list of the critical services
+ for the namespace(asic)
+
+ If the dut is multi asic, then the asic_id is appended t0 the
+ _DEFAULT_ASIC_SERVICES list
+ Returns:
+ [list]: list of the services running the namespace/asic
+ """
+ a_service = []
+ for service in self._DEFAULT_ASIC_SERVICES:
+ a_service.append("{}{}".format(
+ service, self.asic_index if self.sonichost.is_multi_asic else ""))
+ return a_service
+
+ def is_it_frontend(self):
+ if self.sonichost.is_multi_asic:
+ sub_role_cmd = 'sudo sonic-cfggen -d -v DEVICE_METADATA.localhost.sub_role -n {}'.format(self.namespace)
+ sub_role = self.sonichost.shell(sub_role_cmd)["stdout_lines"][0].decode("utf-8")
+ if sub_role is not None and sub_role.lower() == 'frontend':
+ return True
+ return False
+
+ def is_it_backend(self):
+ if self.sonichost.is_multi_asic:
+ sub_role_cmd = 'sudo sonic-cfggen -d -v DEVICE_METADATA.localhost.sub_role -n {}'.format(self.namespace)
+ sub_role = self.sonichost.shell(sub_role_cmd)["stdout_lines"][0].decode("utf-8")
+ if sub_role is not None and sub_role.lower() == 'backend':
+ return True
+ return False
+
+ def get_docker_cmd(self, cmd, container_name):
+ if self.sonichost.is_multi_asic:
+ return "sudo docker exec {}{} {}".format(container_name, self.asic_index, cmd)
+ return cmd
+
+ def get_asic_namespace(self):
+ if self.sonichost.is_multi_asic:
+ return self.namespace
+ return DEFAULT_NAMESPACE
+
+ def bgp_facts(self, *module_args, **complex_args):
+ """ Wrapper method for bgp_facts ansible module.
+ If number of asics in SonicHost are more than 1, then add 'instance_id' param for this Asic
+
+ Args:
+ module_args: other ansible module args passed from the caller
+ complex_args: other ansible keyword args
+
+ Returns:
+ if SonicHost has only 1 asic, then return the bgp_facts for the global namespace, else bgp_facts for the bgp instance for my asic_index.
+ """
+ if self.sonichost.facts['num_asic'] != 1:
+ complex_args['instance_id'] = self.asic_index
+ return self.sonichost.bgp_facts(*module_args, **complex_args)
+
+ def config_facts(self, *module_args, **complex_args):
+ """ Wrapper method for config_facts ansible module.
+ If number of asics in SonicHost are more than 1, then add 'namespace' param for this Asic
+ If 'host' is not specified in complex_args, add it - as it is a mandatory param for the config_facts module
+
+ Args:
+ module_args: other ansible module args passed from the caller
+ complex_args: other ansible keyword args
+
+ Returns:
+ if SonicHost has only 1 asic, then return the config_facts for the global namespace, else config_facts for namespace for my asic_index.
+ """
+ if 'host' not in complex_args:
+ complex_args['host'] = self.sonichost.hostname
+ if self.sonichost.is_multi_asic:
+ complex_args['namespace'] = self.namespace
+ return self.sonichost.config_facts(*module_args, **complex_args)
+
+ def show_interface(self, *module_args, **complex_args):
+ """Wrapper for the ansible module 'show_interface'
+
+ Args:
+ module_args: other ansible module args passed from the caller
+ complex_args: other ansible keyword args
+
+ Returns:
+ [dict]: [the output of show interface status command]
+ """
+ complex_args['namespace'] = self.namespace
+ return self.sonichost.show_interface(*module_args, **complex_args)
+
+ def show_ip_interface(self, *module_args, **complex_args):
+ """Wrapper for the ansible module 'show_ip_interface'
+
+ Args:
+ module_args: other ansible module args passed from the caller
+ complex_args: other ansible keyword args
+
+ Returns:
+ [dict]: [the output of show interface status command]
+ """
+ complex_args['namespace'] = self.namespace
+ return self.sonichost.show_ip_interface(*module_args, **complex_args)
+
+ def run_redis_cli_cmd(self, redis_cmd):
+ if self.namespace != DEFAULT_NAMESPACE:
+ redis_cli = "/usr/bin/redis-cli"
+ cmd = "sudo ip netns exec {} {} {}".format(self.namespace, redis_cli,redis_cmd)
+ return self.sonichost.command(cmd, verbose=False)
+ # for single asic platforms there are not Namespaces, so the redis-cli command is same the DUT host
+ return self.sonichost.run_redis_cli_cmd(redis_cmd)
+
+ def get_ip_route_info(self, dstip):
+ return self.sonichost.get_ip_route_info(dstip, self.cli_ns_option)
+
+ @property
+ def os_version(self):
+ return self.sonichost.os_version
+
+ def interface_facts(self, *module_args, **complex_args):
+ """Wrapper for the interface_facts ansible module.
+
+ Args:
+ module_args: other ansible module args passed from the caller
+ complex_args: other ansible keyword args
+
+ Returns:
+ For a single ASIC platform, the namespace = DEFAULT_NAMESPACE, will retrieve interface facts for the global namespace
+ In case of multi-asic, if namespace = , will retrieve interface facts for that namespace.
+ """
+ complex_args['namespace'] = self.namespace
+ return self.sonichost.interface_facts(*module_args, **complex_args)
+
+ def get_docker_name(self, service):
+ if (not self.sonichost.is_multi_asic or
+ service not in self._DEFAULT_ASIC_SERVICES
+ ):
+ return service
+
+ return self._MULTI_ASIC_DOCKER_NAME.format(service, self.asic_index)
+
+ def stop_service(self, service):
+ if not self.sonichost.is_multi_asic:
+ service_name = service
+ docker_name = service
+ else:
+ service_name = self._MULTI_ASIC_SERVICE_NAME.format(
+ service, self.asic_index
+ )
+ docker_name = self._MULTI_ASIC_DOCKER_NAME.format(
+ service, self.asic_index
+ )
+ return self.sonichost.stop_service(service_name, docker_name)
+
+ def delete_container(self, service):
+ if self.sonichost.is_multi_asic:
+ service = self._MULTI_ASIC_DOCKER_NAME.format(
+ service, self.asic_index
+ )
+ return self.sonichost.delete_container(service)
+
+ def is_container_running(self, service):
+ if self.sonichost.is_multi_asic:
+ service = self._MULTI_ASIC_DOCKER_NAME.format(
+ service, self.asic_index
+ )
+ return self.sonichost.is_container_running(service)
+
+ def is_service_running(self, service_name, docker_name):
+ if self.sonichost.is_multi_asic:
+ docker_name = self._MULTI_ASIC_DOCKER_NAME.format(
+ docker_name, self.asic_index
+ )
+ return self.sonichost.is_service_running(service_name, docker_name)
+
+ def ping_v4(self, ipv4, count=1):
+ """
+ Returns 'True' if ping to IP address works, else 'False'
+ Args:
+ IPv4 address
+
+ Returns:
+ True or False
+ """
+ try:
+ socket.inet_aton(ipv4)
+ except socket.error:
+ raise Exception("Invalid IPv4 address {}".format(ipv4))
+
+ try:
+ self.sonichost.shell("{}ping -q -c{} {} > /dev/null".format(
+ self._ns_arg, count, ipv4
+ ))
+ except RunAnsibleModuleFail:
+ return False
+ return True
+
+ def get_active_ip_interfaces(self):
+ """
+ Return a dict of active IP (Ethernet or PortChannel) interfaces, with
+ interface and peer IPv4 address.
+
+ Returns:
+ Dict of Interfaces and their IPv4 address
+ """
+ ip_ifs = self.show_ip_interface()["ansible_facts"]
+ ip_ifaces = {}
+ for k,v in ip_ifs["ip_interfaces"].items():
+ if (k.startswith("Ethernet") or
+ (k.startswith("PortChannel") and k.find("400") == -1)
+ ):
+ if (v["admin"] == "up" and v["oper_state"] == "up" and
+ self.ping_v4(v["peer_ipv4"])
+ ):
+ ip_ifaces[k] = {
+ "ipv4" : v["ipv4"],
+ "peer_ipv4" : v["peer_ipv4"]
+ }
+
+ return ip_ifaces
+
+ def bgp_drop_rule(self, ip_version, state="present"):
+ """
+ Programs iptable rule to either add or remove DROP for
+ BGP control frames
+
+ Args:
+ ip_version: IPv4 or IPv6
+ state = "present" or "absent" (add or remove)
+
+ Returns:
+ None
+ """
+ ipcmd = "iptables" if ip_version == "ipv4" else "ip6tables"
+ run_opt = "-I INPUT 1" if state == "present" else "-D INPUT"
+ check_opt = "-C INPUT"
+ cmd = (
+ "{}/sbin/{} -t filter {{}} -p tcp -j DROP --destination-port bgp"
+ ).format(self._ns_arg, ipcmd)
+
+ check_cmd = cmd.format(check_opt)
+ run_cmd = cmd.format(run_opt)
+
+ output = "Rule {} needs no action".format(run_cmd)
+ try:
+ self.sonichost.command(check_cmd)
+ if state == "absent":
+ output = self.sonichost.command(run_cmd)
+ except RunAnsibleModuleFail as e:
+ if state == "present":
+ output = self.sonichost.command(run_cmd)
+
+ logger.debug(output)
+
+ def remove_ssh_tunnel_sai_rpc(self):
+ """
+ Removes any ssh tunnels if present created for syncd RPC communication
+
+ Returns:
+ None
+ """
+ if not self.sonichost.is_multi_asic:
+ return
+ return self.sonichost.remove_ssh_tunnel_sai_rpc()
+
+ def create_ssh_tunnel_sai_rpc(self):
+ """
+ Create ssh tunnel between host and ASIC namespace on syncd RPC
+ port. This is used to forward thrift calls to and from the syncd
+ running on this ASIC.
+
+ Returns:
+ None
+ """
+ if not self.sonichost.is_multi_asic:
+ return
+ self.remove_ssh_tunnel_sai_rpc()
+ ssh_authorize_local_user(self.sonichost)
+
+ ip_ifs = self.show_ip_interface(
+ namespace=self.namespace
+ )["ansible_facts"]
+
+ # create SSH tunnel to ASIC namespace
+ ns_docker_if_ipv4 = ip_ifs["ip_interfaces"]["eth0"]["ipv4"]
+ try:
+ socket.inet_aton(ns_docker_if_ipv4)
+ except socket.error:
+ raise Exception("Invalid V4 address {}".format(ns_docker_if_ipv4))
+
+ self.sonichost.shell(
+ ("ssh -o StrictHostKeyChecking=no -fN"
+ " -L *:9092:{}:9092 localhost"
+ ).format(ns_docker_if_ipv4)
+ )
+
+ def command(self, cmdstr):
+ """
+ Prepend 'ip netns' option for commands meant for this ASIC
+
+ Args:
+ cmdstr
+ Returns:
+ Output from the ansible command module
+ """
+ if not self.sonichost.is_multi_asic or self.namespace == DEFAULT_NAMESPACE:
+ return self.sonichost.command(cmdstr)
+
+ cmdstr = "sudo ip netns exec {} ".format(self.namespace) + cmdstr
+
+ return self.sonichost.command(cmdstr)
+
+ def run_redis_cmd(self, argv=[]):
+ """
+ Runs redis command on DUT.
+
+ Args:
+ argv (list): List of command options to run on duthost
+
+ Returns:
+ stdout (list): List of stdout lines spewed by the invoked command
+ """
+ if self.sonichost.is_multi_asic:
+ db_docker_instance = self.get_docker_name("database")
+ argv = ["docker", "exec", db_docker_instance] + argv
+
+ result = self.sonichost.shell(argv=argv)
+ pytest_assert(
+ result["rc"] == 0,
+ "Failed to run Redis command '{0}' with error '{1}'".format(
+ " ".join(map(str, argv)), result["stderr"]
+ )
+ )
+
+ return result["stdout_lines"]
diff --git a/tests/common/devices/vmhost.py b/tests/common/devices/vmhost.py
new file mode 100644
index 00000000000..680d5f52135
--- /dev/null
+++ b/tests/common/devices/vmhost.py
@@ -0,0 +1,21 @@
+from tests.common.devices.base import AnsibleHostBase
+
+
+class VMHost(AnsibleHostBase):
+ """
+ @summary: Class for VM server
+
+ For running ansible module on VM server
+ """
+
+ def __init__(self, ansible_adhoc, hostname):
+ AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
+
+ @property
+ def external_port(self):
+ if not hasattr(self, "_external_port"):
+ vm = self.host.options["variable_manager"]
+ im = self.host.options["inventory_manager"]
+ hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False)
+ setattr(self, "_external_port", hostvars["external_port"])
+ return getattr(self, "_external_port")
diff --git a/tests/common/dualtor/__init__.py b/tests/common/dualtor/__init__.py
index e69de29bb2d..bf9bb1d375f 100644
--- a/tests/common/dualtor/__init__.py
+++ b/tests/common/dualtor/__init__.py
@@ -0,0 +1,2 @@
+from tests.common.dualtor.dual_tor_utils import *
+from tests.common.dualtor.mux_simulator_control import *
diff --git a/tests/common/dualtor/constants.py b/tests/common/dualtor/constants.py
new file mode 100644
index 00000000000..9abf2ba8683
--- /dev/null
+++ b/tests/common/dualtor/constants.py
@@ -0,0 +1,10 @@
+
+UPPER_TOR = "upper_tor"
+LOWER_TOR = "lower_tor"
+TOGGLE = "toggle"
+RANDOM = "random"
+
+NIC = "nic"
+
+DROP = "drop"
+OUTPUT = "output"
diff --git a/tests/common/dualtor/control_plane_utils.py b/tests/common/dualtor/control_plane_utils.py
index f61db10ee4f..71ad00bbbcd 100644
--- a/tests/common/dualtor/control_plane_utils.py
+++ b/tests/common/dualtor/control_plane_utils.py
@@ -1,119 +1,122 @@
"""Contains functions used to verify control plane(APP_DB, STATE_DB) values."""
+import json
+import logging
+
+from tests.common.helpers.assertions import pytest_assert
+
+logger = logging.getLogger(__name__)
APP_DB = 0
STATE_DB = 6
+
+DB_NAME_MAP = {
+ APP_DB: "APP_DB",
+ STATE_DB: "STATE_DB"
+}
+
+DB_SEPARATOR_MAP = {
+ APP_DB: ":",
+ STATE_DB: "|"
+}
+
APP_DB_MUX_STATE_FIELDS = {
- "MUX_CABLE_TABLE": "state",
- "HW_MUX_CABLE_TABLE": "state",
- "MUX_CABLE_RESPONSE_TABLE": "response"
+ "MUX_CABLE_TABLE": "state", #
+ "HW_MUX_CABLE_TABLE": "state", #
}
+
STATE_DB_MUX_STATE_FIELDS = {
- "MUX_CABLE_TABLE": "state",
- "HW_MUX_CABLE_TABLE": "state"
+ "MUX_CABLE_TABLE": "state", #
+ "HW_MUX_CABLE_TABLE": "state", #
+ "MUX_LINKMGR_TABLE": "state" #
}
-
-def _keys(duthost, db, key_pattern):
- """Run Redis command keys over db on duthost."""
- command = "redis-cli --raw -n {db} keys '{key_pattern}'".format(db=db, key_pattern=key_pattern)
- keys_result = duthost.shell(command)
- if not keys_result["stdout"].strip():
- raise ValueError("No keys match key pattern {}".format(key_pattern))
- return [line.strip() for line in keys_result["stdout_lines"]]
+DB_CHECK_FIELD_MAP = {
+ APP_DB: APP_DB_MUX_STATE_FIELDS,
+ STATE_DB: STATE_DB_MUX_STATE_FIELDS
+}
-def _hgetall(duthost, db, key):
- """Run Redis command hgetall over db on duthost."""
- command = "redis-cli --raw -n {db} hgetall '{key}'".format(db=db, key=key)
+def _dump_db(duthost, db, key_pattern):
+ """Dump redis database matching specificied key pattern"""
+ command = "redis-dump -d {db} -k \"{key_pattern}\"".format(
+ db=db, key_pattern=key_pattern)
lines = duthost.shell(command)["stdout_lines"]
- return {lines[i]: lines[i + 1] for i in range(0, len(lines), 2)}
+ return json.loads(lines[0])
-def expect_app_db_values(duthost, intf_names, state):
+def expect_db_values(duthost, db, state, health=None, intf_names='all'):
"""
- Query APP_DB on `duthost` and check if mux cable fields match the given state.
-
- The following tables/fields are checked:
+ Query db on `tor_host` and check if the mux-related fields match the
+ expected values.
- MUX_CABLE_TABLE|PORTNAME:
- - state:
-
- HW_MUX_CABLE_TABLE|PORTNAME
- - state:
-
- MUX_CABLE_RESPONSE_TABLE|PORTNAME:
- - response:
+ The tables/fields checked are defined in DB_CHECK_FIELD_MAP
Args:
- duthost: DUT host object (needs to be passed by calling function from duthosts fixture)
- intf_names: A list of the PORTNAME to check in each table
- state: The expected value for each field in each table listed above.
+ duthost: DUT host object (needs to be passed by calling function
+ from duthosts fixture)
+ db: Database number to check. Should be either 0 for APP_DB or
+ 6 for STATE_DB
+ state: The expected value for each of the `state` fields in both
+ tables
+ health: The expected value for the `state` field in the
+ MUX_LINKMGR_TABLE table (only needed for STATE_DB)
+ intf_names: A list of the PORTNAME to check in each table, or 'all'
+ (by default) to check all MUX_CABLE interfaces
- Returns:
- True if the mux cable fields match the given state.
Raises:
- ValueError if the mux cable fields don't match the given state.
+ AssertionError if th mux cable fields don't match the given states.
"""
- db = APP_DB
- mux_states = {}
- match = True
- for intf_name in intf_names:
- mux_states[intf_name] = {}
- for table, field in APP_DB_MUX_STATE_FIELDS.items():
- key = table + "|" + intf_name
- _keys(duthost, db, key)
- mux_states[intf_name][table] = _hgetall(duthost, db, key)
- if mux_states[intf_name][table][field] != state:
- match = False
-
- if not match:
- raise ValueError("Mux cable states unmatch, expect state: {state}, "
- "actual APP_DB values: {db_states}".format(state=state, db_states=mux_states))
- return match
-
-
-def expect_state_db_values(duthost, intf_names, state, health):
+ logger.info("Verifying {} values on {}: "
+ "expected state = {}, expected health = {}".format(
+ DB_NAME_MAP[db], duthost, state, health))
+ if intf_names == 'all':
+ mux_intfs = duthost.get_running_config_facts()['MUX_CABLE'].keys()
+ else:
+ mux_intfs = intf_names
+
+ mismatch_ports = {}
+ separator = DB_SEPARATOR_MAP[db]
+
+ db_check_fields = DB_CHECK_FIELD_MAP[db]
+ for table, field in db_check_fields.items():
+ key_pattern = table + separator + "*"
+ db_dump = _dump_db(duthost, db, key_pattern)
+
+ if table == 'MUX_LINKMGR_TABLE':
+ pytest_assert(
+ health is not None,
+ "Must give a value for `health` when checking STATE_DB values")
+ target_value = health
+ else:
+ target_value = state
+
+ for intf_name in mux_intfs:
+ table_key = '{}{}{}'.format(table, separator, intf_name)
+
+ if db_dump[table_key]['value'][field] != target_value:
+ mismatch_ports[table_key] = db_dump[table_key]['value']
+
+ pytest_assert(not bool(mismatch_ports),
+ "Database states don't match expected state {state},"
+ "incorrect {db_name} values {db_states}"
+ .format(state=state, db_name=DB_NAME_MAP[db],
+ db_states=json.dumps(mismatch_ports,
+ indent=4,
+ sort_keys=True)))
+
+
+def verify_tor_states(expected_active_host, expected_standby_host,
+ expected_standby_health='healthy', intf_names='all'):
"""
- Query STATE_DB on `tor_host` and check if mux cable fields match the given states.
-
- The following tables/fields are checked:
-
- MUX_CABLE_TABLE|PORTNAME:
- - state:
- - health:
-
- HW_MUX_CABLE_TABLE|PORTNAME:
- - state:
-
- Args:
- duthost: DUT host object (needs to be passed by calling function from duthosts fixture)
- intf_names: A list of the PORTNAME to check in each table
- state: The expected value for each of the `state` fields in both tables
- health: The expected value for the `health` field in the MUX_CABLE_TABLE table
-
- Returns:
- True if actual values match expected.
- Raises:
- ValueError if th mux cable fields don't match the given states.
+ Verifies that the expected states for active and standby ToRs are
+ reflected in APP_DB and STATE_DB on each device
"""
- db = STATE_DB
- mux_states = {}
- match = True
- for intf_name in intf_names:
- mux_states[intf_name] = {}
- for table, field in STATE_DB_MUX_STATE_FIELDS.items():
- key = table + "|" + intf_name
- _keys(duthost, db, key)
- mux_states[intf_name][table] = _hgetall(duthost, db, key)
-
- if mux_states[intf_name][table][field] != state:
- match = False
-
- if mux_states[intf_name]["MUX_CABLE_TABLE" + "|" + intf_name].get("health") != health:
- match = False
-
- if not match:
- raise ValueError("Mux cable states unmatch, expect state: {state}, "
- "expect health: {health}, actual STATE_DB values: {db_states}".format(
- state=state, db_states=mux_states, health=health))
- return match
+
+ expect_db_values(expected_active_host, APP_DB,
+ 'active', intf_names=intf_names)
+ expect_db_values(expected_active_host, STATE_DB, 'active',
+ 'healthy', intf_names=intf_names)
+ expect_db_values(expected_standby_host, APP_DB,
+ 'standby', intf_names=intf_names)
+ expect_db_values(expected_standby_host, STATE_DB, 'standby',
+ expected_standby_health, intf_names=intf_names)
diff --git a/tests/common/dualtor/data_plane_utils.py b/tests/common/dualtor/data_plane_utils.py
new file mode 100644
index 00000000000..a2d6ee8500f
--- /dev/null
+++ b/tests/common/dualtor/data_plane_utils.py
@@ -0,0 +1,230 @@
+import pytest
+import json
+from tests.common.dualtor.dual_tor_io import DualTorIO
+from tests.common.helpers.assertions import pytest_assert
+import threading
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+def get_standbyhost(duthosts, activehost):
+ if duthosts[0] == activehost:
+ return duthosts[1]
+ else:
+ return duthosts[0]
+
+
+def arp_setup(ptfhost):
+ logger.info('Copy ARP responder to the PTF container {}'\
+ .format(ptfhost.hostname))
+ ptfhost.copy(src='scripts/arp_responder.py', dest='/opt')
+ ptfhost.host.options["variable_manager"].extra_vars.update(
+ {"arp_responder_args": ""})
+ ptfhost.template(src="templates/arp_responder.conf.j2",
+ dest="/etc/supervisor/conf.d/arp_responder.conf")
+ logging.info("Refreshing supervisorctl")
+ ptfhost.shell("supervisorctl reread && supervisorctl update")
+
+
+def validate_no_traffic_loss(tor_IO, allowed_disruption, delay):
+ """
+ Validates traffic loss is as expected:
+
+ """
+ received_counter = tor_IO.get_total_received_packets()
+ total_disruptions = tor_IO.get_total_disruptions()
+ longest_disruption = tor_IO.get_longest_disruption()
+ total_lost_packets = tor_IO.get_total_lost_packets()
+ duplicated_packets = tor_IO.get_duplicated_packets_count()
+
+ if received_counter:
+ pytest_assert(total_disruptions <= allowed_disruption, "Traffic was "\
+ "disrupted {} times. Allowed number of disruption: {}"\
+ .format(total_disruptions, allowed_disruption))
+ pytest_assert(longest_disruption <= delay, "Traffic was disrupted for {}s. "\
+ "Maximum allowed disruption: {}s".format(longest_disruption, delay))
+ else:
+ pytest_assert(received_counter > 0, "Test failed to capture any meaningful "\
+ "received packet")
+
+ if total_lost_packets:
+ logging.warn("Packets were lost during the test. Total lost count: {}"\
+ .format(total_lost_packets))
+ pytest_assert(duplicated_packets == 0, "Duplicated packets received. "\
+ "Count: {}.".format(duplicated_packets))
+
+
+def generate_test_report(tor_IO):
+ """
+ Generates a report (dictionary) of I/O metrics that were calculated as part
+ of the dataplane test. This report is to be used by testcases to verify the
+ results as expected by test-specific scenarios
+ Returns:
+ data_plane_test_report (dict): sent/received/lost/disrupted packet counters
+ """
+ data_plane_test_report = {
+ "total_received_packets": tor_IO.get_total_received_packets(),
+ "total_sent_packets": tor_IO.get_total_sent_packets(),
+ "duplicated_packets_count": tor_IO.get_duplicated_packets_count(),
+ "disruptions": {
+ "total_disruptions": tor_IO.get_total_disruptions(),
+ "total_disrupted_packets": tor_IO.get_total_disrupted_packets(),
+ "total_disruption_time": tor_IO.get_total_disrupt_time(),
+ "longest_disruption": tor_IO.get_longest_disruption(),
+ "total_lost_packets": tor_IO.get_total_lost_packets()
+ }
+ }
+ logger.info("Data plane traffic test results: \n{}".format(json.dumps(data_plane_test_report, indent=4)))
+ return data_plane_test_report
+
+
+@pytest.fixture
+def send_t1_to_server_with_action(duthosts, ptfhost, ptfadapter, tbinfo):
+ """
+ Starts IO test from T1 router to server.
+ As part of IO test the background thread sends and sniffs packets.
+ As soon as sender and sniffer threads are in running state, a callback
+ action is performed. When action is finished, the sender and sniffer threads
+ are given time to complete. Finally, the collected packets are sniffed,
+ and the disruptions are measured.
+
+ As part of teardown, the ARP table is cleared and ptf dataplane is flushed.
+ Args:
+ ptfhost (fixture): Fixture for PTF instance to be used during the test
+ ptfadapter (fixture): Fixture to use ptf ptf testutils
+ tbinfo (fixture): Fixture for testebd inventory information
+
+ Yields:
+ function: A helper function to run and monitor the IO test
+ """
+ arp_setup(ptfhost)
+
+ duthosts_list = []
+ def t1_to_server_io_test(activehost, tor_vlan_port=None,
+ delay=0, action=None, verify=False):
+ """
+ Helper method for `send_t1_to_server_with_action`.
+ Starts sender and sniffer before performing the action on the tor host.
+
+ Args:
+ tor_vlan_port (str): Port name (as in minigraph_portchannels) which
+ corresponds to VLAN member port of the activehost. This is used to
+ select the downstream server IP to send the packets to.
+ default - None. If set to None, the test sends traffic to randomly
+ selected downstream server addresses.
+ delay (int): Maximum acceptable delay for traffic to continue flowing again.
+ action (function): A Lambda function (with optional args) which performs
+ the desired action while the traffic is flowing from server to T1.
+ default - `None`: No action will be performed and traffic will run
+ between server to T1 router.
+ verify (boolean): If set to True, test will automatically verify packet
+ drops/duplication based on given qualification critera
+ """
+ duthosts_list.append(activehost)
+ io_ready = threading.Event()
+ standbyhost = get_standbyhost(duthosts, activehost)
+ tor_IO = DualTorIO(activehost, standbyhost, ptfhost, ptfadapter, tbinfo,
+ io_ready, tor_vlan_port=tor_vlan_port)
+ send_and_sniff = threading.Thread(target=tor_IO.start_io_test,
+ kwargs={'traffic_generator': tor_IO.generate_from_t1_to_server})
+ send_and_sniff.start()
+ if action:
+ # do not perform the provided action until IO threads (sender and sniffer) are ready
+ io_ready.wait()
+ logger.info("Sender and sniffer threads started, ready to execute "\
+ "the callback action")
+ action()
+
+ # Wait for the IO to complete before doing checks
+ logger.info("Waiting for sender and sniffer threads to finish..")
+ send_and_sniff.join()
+ generate_test_report(tor_IO)
+ if verify:
+ allowed_disruption = 0 if delay == 0 else 1
+ validate_no_traffic_loss(tor_IO, allowed_disruption=allowed_disruption,
+ delay=delay)
+
+ yield t1_to_server_io_test
+
+ # cleanup torIO
+ ptfadapter.dataplane.flush()
+ for duthost in duthosts_list:
+ logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
+ duthost.shell('sonic-clear arp')
+
+
+@pytest.fixture
+def send_server_to_t1_with_action(duthosts, ptfhost, ptfadapter, tbinfo):
+ """
+ Starts IO test from server to T1 router.
+ As part of IO test the background thread sends and sniffs packets.
+ As soon as sender and sniffer threads are in running state, a callback
+ action is performed.
+ When action is finished, the sender and sniffer threads are given time to
+ complete. Finally, the collected packets are sniffed, and the disruptions
+ are measured.
+
+ As part of teardown, the ARP, FDB tables are cleared and ptf dataplane is flushed.
+ Args:
+ ptfhost (fixture): Fixture for PTF instance to be used during the test
+ ptfadapter (fixture): Fixture to use ptf testutils
+ tbinfo (fixture): Fixture for testebd inventory information
+
+ Yields:
+ function: A helper function to run and monitor the IO test
+ """
+ arp_setup(ptfhost)
+
+ duthosts_list = []
+ def server_to_t1_io_test(activehost, tor_vlan_port=None,
+ delay=0, action=None, verify=False):
+ """
+ Helper method for `send_server_to_t1_with_action`.
+ Starts sender and sniffer before performing the action on the tor host.
+
+ Args:
+ tor_vlan_port (str): Port name (as in minigraph_portchannels) which
+ corresponds to VLAN member port of the activehost.
+ default - None. If set to None, the test chooses random VLAN
+ member port for this test.
+ delay (int): Maximum acceptable delay for traffic to continue flowing again.
+ action (function): A Lambda function (with optional args) which
+ performs the desired action while the traffic flows from server to T1.
+ default - `None`: No action will be performed and traffic will run
+ between server to T1 router.
+ verify (boolean): If set to True, test will automatically verify packet
+ drops/duplication based on given qualification critera
+ """
+ duthosts_list.append(activehost)
+ io_ready = threading.Event()
+ standbyhost = get_standbyhost(duthosts, activehost)
+ tor_IO = DualTorIO(activehost, standbyhost, ptfhost, ptfadapter, tbinfo,
+ io_ready, tor_vlan_port=tor_vlan_port)
+ send_and_sniff = threading.Thread(target=tor_IO.start_io_test,
+ kwargs={'traffic_generator': tor_IO.generate_from_server_to_t1})
+ send_and_sniff.start()
+
+ if action:
+ # do not perform the provided action until
+ # IO threads (sender and sniffer) are ready
+ io_ready.wait()
+ logger.info("Sender and sniffer threads started, ready to execute the "\
+ "callback action")
+ action()
+
+ # Wait for the IO to complete before doing checks
+ send_and_sniff.join()
+ generate_test_report(tor_IO)
+ if verify:
+ allowed_disruption = 0 if delay == 0 else 1
+ validate_no_traffic_loss(tor_IO, allowed_disruption=allowed_disruption,
+ delay=delay)
+
+ yield server_to_t1_io_test
+
+ # cleanup torIO
+ ptfadapter.dataplane.flush()
+ for duthost in duthosts_list:
+ logger.info('Clearing arp entries on DUT {}'.format(duthost.hostname))
+ duthost.shell('sonic-clear arp')
diff --git a/tests/common/dualtor/dual_tor_io.py b/tests/common/dualtor/dual_tor_io.py
new file mode 100644
index 00000000000..9b0f7427138
--- /dev/null
+++ b/tests/common/dualtor/dual_tor_io.py
@@ -0,0 +1,582 @@
+import datetime
+import threading
+import time
+import socket
+import random
+import struct
+import ipaddress
+import logging
+import json
+from collections import defaultdict
+
+import scapy.all as scapyall
+import ptf.testutils as testutils
+from tests.ptf_runner import ptf_runner
+from natsort import natsorted
+
+TCP_DST_PORT = 5000
+SOCKET_RECV_BUFFER_SIZE = 10 * 1024 * 1024
+PTFRUNNER_QLEN = 1000
+VLAN_INDEX = 0
+VLAN_HOSTS = 100
+VLAN_BASE_MAC_PATTERN = "72060001{:04}"
+LAG_BASE_MAC_PATTERN = '5c010203{:04}'
+
+logger = logging.getLogger(__name__)
+
+
+class DualTorIO:
+ def __init__(self, activehost, standbyhost, ptfhost, ptfadapter, tbinfo,
+ io_ready, tor_vlan_port=None):
+ self.tor_port = None
+ self.tor_vlan_port = tor_vlan_port
+ self.duthost = activehost
+ self.ptfadapter = ptfadapter
+ self.ptfhost = ptfhost
+ self.tbinfo = tbinfo
+ self.io_ready_event = io_ready
+ self.dut_mac = self.duthost.facts["router_mac"]
+ self.active_mac = self.dut_mac
+ if standbyhost:
+ self.standby_mac = standbyhost.facts["router_mac"]
+
+ self.mux_cable_table = self.duthost.get_running_config_facts()['MUX_CABLE']
+ if tor_vlan_port:
+ if tor_vlan_port in self.mux_cable_table:
+ self.downstream_dst_ip = self.mux_cable_table[tor_vlan_port]['server_ipv4'].split("/")[0]
+ else:
+ logger.error("Port {} not found in MUX cable table".format(tor_vlan_port))
+ else:
+ self.downstream_dst_ip = None
+
+ self.time_to_listen = 180.0
+ self.sniff_time_incr = 60
+ self.send_interval = 0.0035 # Inter-packet interval
+ # How many packets to be sent by sender thread
+ self.packets_to_send = min(int(self.time_to_listen /
+ (self.send_interval + 0.0015)), 45000)
+
+ self.dataplane = self.ptfadapter.dataplane
+ self.dataplane.flush()
+ self.total_disrupt_time = None
+ self.disrupts_count = None
+ self.total_disrupt_packets = None
+ self.max_lost_id = None
+ self.max_disrupt_time = None
+ self.received_counter = int()
+ self.lost_packets = dict()
+ self.duplicated_packets_count = int()
+ self.total_lost_packets = None
+ # This list will contain all unique Payload ID, to filter out received floods.
+ self.unique_id = set()
+
+ mg_facts = self.duthost.get_extended_minigraph_facts(self.tbinfo)
+ prefix_len = mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['prefixlen'] - 3
+ test_network = ipaddress.ip_address(
+ mg_facts['minigraph_vlan_interfaces'][VLAN_INDEX]['addr']) +\
+ (1 << (32 - prefix_len))
+ self.default_ip_range = str(ipaddress.ip_interface(unicode(
+ str(test_network) + '/{0}'.format(prefix_len))).network)
+ self.src_addr, mask = self.default_ip_range.split('/')
+ self.n_hosts = 2**(32 - int(mask))
+ self.port_indices = mg_facts['minigraph_ptf_indices']
+ portchannel_info = mg_facts['minigraph_portchannels']
+ self.port_channel_ports = dict()
+ for pc in portchannel_info.values():
+ for member in pc['members']:
+ self.port_channel_ports.update({member: self.port_indices[member]})
+
+ self.server_ip_list = list()
+ self.vlan_interfaces = mg_facts["minigraph_vlan_interfaces"][VLAN_INDEX]
+ self.vlan_network = self.vlan_interfaces["subnet"]
+ self.vlan_ports = dict()
+ for ifname in mg_facts["minigraph_vlans"].values()[VLAN_INDEX]["members"]:
+ self.vlan_ports.update({ifname: self.port_indices[ifname]})
+ self.vlan_host_map = self._generate_vlan_servers()
+ self.__configure_arp_responder()
+
+ vlan_table = self.duthost.get_running_config_facts()['VLAN']
+ vlan_name = list(vlan_table.keys())[0]
+ self.vlan_mac = vlan_table[vlan_name]['mac']
+
+ logger.info("VLAN ports: {}".format(str(self.vlan_ports.keys())))
+ logger.info("PORTCHANNEL ports: {}".format(str(self.port_channel_ports.keys())))
+
+
+ def _generate_vlan_servers(self):
+ """
+ @summary: Generates physical port maps which is a set of IP address and
+ their associated MAC addresses
+ - MACs are generated sequentially as offsets from VLAN_BASE_MAC_PATTERN
+ - IP addresses are randomly selected from the given VLAN network
+ - "Hosts" (IP/MAC pairs) are distributed evenly amongst the ports in the VLAN
+ """
+ for _, config in natsorted(self.mux_cable_table.items()):
+ self.server_ip_list.append(str(config['server_ipv4'].split("/")[0]))
+ logger.info("ALL server address:\n {}".format(self.server_ip_list))
+
+ vlan_host_map = dict()
+ addr_list = list(self.server_ip_list)
+ for i, port in enumerate(sorted(self.vlan_ports.values())):
+ addr = addr_list[i]
+ vlan_host_map[port] = [str(addr)]
+
+ return vlan_host_map
+
+
+ def __configure_arp_responder(self):
+ """
+ @summary: Generate ARP responder configuration using vlan_host_map.
+ Copy this configuration to PTF and restart arp_responder
+ """
+ arp_responder_conf = {}
+ for port in self.vlan_host_map:
+ arp_responder_conf['eth{}'.format(port)] = self.vlan_host_map[port]
+ with open("/tmp/from_t1.json", "w") as fp:
+ json.dump(arp_responder_conf, fp)
+ self.ptfhost.copy(src="/tmp/from_t1.json", dest="/tmp/from_t1.json")
+ self.ptfhost.shell("supervisorctl reread && supervisorctl update")
+ self.ptfhost.shell("supervisorctl restart arp_responder")
+ logger.info("arp_responder restarted")
+
+
+ def start_io_test(self, traffic_generator=None):
+ """
+ @summary: The entry point to start the TOR dataplane I/O test.
+ Args:
+ traffic_generator (function): A callback function to decide the
+ traffic direction (T1 to server / server to T1)
+ Allowed values: self.generate_from_t1_to_server or
+ self.generate_from_server_to_t1
+ """
+ # Check in a conditional for better readability
+ if traffic_generator == self.generate_from_t1_to_server:
+ self.generate_from_t1_to_server()
+ elif traffic_generator == self.generate_from_server_to_t1:
+ self.generate_from_server_to_t1()
+ else:
+ logger.error("Traffic generator not provided or invalid")
+ return
+ # start and later join the sender and sniffer threads
+ self.send_and_sniff(sender=self.traffic_sender_thread,
+ sniffer=self.traffic_sniffer_thread)
+
+ # Sender and sniffer have finished the job. Start examining the collected flow
+ self.examine_flow()
+ if self.lost_packets:
+ self.no_routing_stop, self.no_routing_start =\
+ datetime.datetime.fromtimestamp(self.no_routing_stop),\
+ datetime.datetime.fromtimestamp(self.no_routing_start)
+ logger.error("The longest disruption lasted %.3f seconds."\
+ "%d packet(s) lost." % (self.max_disrupt_time, self.max_lost_id))
+ logger.error("Total disruptions count is %d. All disruptions lasted "\
+ "%.3f seconds. Total %d packet(s) lost" % \
+ (self.disrupts_count, self.total_disrupt_time, self.total_disrupt_packets))
+
+
+ def generate_from_t1_to_server(self):
+ """
+ @summary: Generate (not send) the packets to be sent from T1 to server
+ """
+ eth_dst = self.dut_mac
+ eth_src = self.ptfadapter.dataplane.get_mac(0, 0)
+ ip_ttl = 255
+ tcp_dport = TCP_DST_PORT
+
+ if self.tor_port:
+ from_tor_src_port = self.tor_port
+ else:
+ from_tor_src_port = random.choice(self.port_channel_ports.keys())
+
+ from_tor_src_port_index = None
+ for port_name, ptf_port_index in self.port_channel_ports.items():
+ if port_name == from_tor_src_port:
+ from_tor_src_port_index = ptf_port_index
+ break
+
+ if from_tor_src_port_index is None:
+ logger.error("Port index {} not found in the list of port channel ports {}"\
+ .format(from_tor_src_port, self.port_channel_ports.values()))
+
+ logger.info("-"*20 + "T1 to server packet" + "-"*20)
+ logger.info("Source port: {}".format(from_tor_src_port))
+ logger.info("Ethernet address: dst: {} src: {}".format(eth_dst, eth_src))
+ if self.downstream_dst_ip:
+ server_ip_list = [self.downstream_dst_ip]
+ logger.info("IP address: dst: {} src: random".format(self.downstream_dst_ip))
+ else:
+ server_ip_list = self.server_ip_list
+ logger.info("IP address: dst: random src: random")
+ logger.info("TCP port: dst: {}".format(tcp_dport))
+ logger.info("DUT mac: {}".format(self.dut_mac))
+ logger.info("VLAN mac: {}".format(self.vlan_mac))
+ logger.info("-"*50)
+
+ self.packets_list = []
+ for i in range(self.packets_to_send):
+ tcp_tx_packet = testutils.simple_tcp_packet(
+ eth_dst=eth_dst,
+ eth_src=eth_src,
+ ip_dst=random.choice(server_ip_list),
+ ip_src=self.random_host_ip(),
+ ip_ttl=ip_ttl,
+ tcp_dport=tcp_dport)
+ payload = str(i) + 'X' * 60
+ packet = scapyall.Ether(str(tcp_tx_packet))
+ packet.load = payload
+ self.packets_list.append((from_tor_src_port_index, str(packet)))
+
+ self.sent_pkt_dst_mac = self.dut_mac
+ self.received_pkt_src_mac = [self.vlan_mac]
+
+
+ def generate_from_server_to_t1(self):
+ """
+ @summary: Generate (not send) the packets to be sent from server to T1
+ """
+ eth_src = self.ptfadapter.dataplane.get_mac(0, 0)
+ if self.tor_vlan_port:
+ from_server_src_port = self.tor_vlan_port
+ else:
+ from_server_src_port = random.choice(self.vlan_ports.values())
+ self.from_server_src_addr = random.choice(
+ self.vlan_host_map[from_server_src_port])
+ self.from_server_dst_addr = self.random_host_ip()
+ tcp_dport = TCP_DST_PORT
+ tcp_tx_packet = testutils.simple_tcp_packet(
+ eth_dst=self.vlan_mac,
+ eth_src=eth_src,
+ ip_src=self.from_server_src_addr,
+ ip_dst=self.from_server_dst_addr,
+ tcp_dport=tcp_dport
+ )
+ logger.info("-"*20 + "Server to T1 packet" + "-"*20)
+ logger.info("Source port: {}".format(from_server_src_port))
+ logger.info("Ethernet address: dst: {} src: {}".format(self.vlan_mac, eth_src))
+ logger.info("IP address: dst: {} src: {}".format(self.from_server_dst_addr,
+ self.from_server_src_addr))
+ logger.info("TCP port: dst: {} src: 1234".format(tcp_dport))
+ logger.info("Active ToR MAC: {}, Standby ToR MAC: {}".format(self.active_mac,
+ self.standby_mac))
+ logger.info("VLAN MAC: {}".format(self.vlan_mac))
+ logger.info("-"*50)
+
+ self.packets_list = []
+ for i in range(self.packets_to_send):
+ payload = str(i) + 'X' * 60
+ packet = scapyall.Ether(str(tcp_tx_packet))
+ packet.load = payload
+ self.packets_list.append((from_server_src_port, str(packet)))
+
+ self.sent_pkt_dst_mac = self.vlan_mac
+ self.received_pkt_src_mac = [self.active_mac, self.standby_mac]
+
+
+ def random_host_ip(self):
+ """
+ @summary: Helper method to find a random host IP for generating a random src/dst IP address
+ Returns:
+ host_ip (str): Random IP address
+ """
+ host_number = random.randint(2, self.n_hosts - 2)
+ if host_number > (self.n_hosts - 2):
+ raise Exception("host number {} is greater than number of hosts {}\
+ in the network {}".format(
+ host_number, self.n_hosts - 2, self.default_ip_range))
+ src_addr_n = struct.unpack(">I", socket.inet_aton(self.src_addr))[0]
+ net_addr_n = src_addr_n & (2**32 - self.n_hosts)
+ host_addr_n = net_addr_n + host_number
+ host_ip = socket.inet_ntoa(struct.pack(">I", host_addr_n))
+
+ return host_ip
+
+
+ def send_and_sniff(self, sender, sniffer):
+ """
+ @summary: This method starts and joins two background threads in parallel: sender and sniffer
+ """
+ self.sender_thr = threading.Thread(target=sender)
+ self.sniff_thr = threading.Thread(target=sniffer)
+ self.sniffer_started = threading.Event()
+ self.sniff_thr.start()
+ self.sender_thr.start()
+ self.sniff_thr.join()
+ self.sender_thr.join()
+
+
+ def traffic_sender_thread(self):
+ """
+ @summary: Generalized Sender thread (to be used for traffic in both directions)
+ Waits for a signal from the `traffic_sniffer_thread` before actually starting.
+ This is to make sure that that packets are not sent before they are ready to be captured.
+ """
+
+ logger.info("Sender waiting to send {} packets".format(len(self.packets_list)))
+
+ self.sniffer_started.wait(timeout=10)
+ sender_start = datetime.datetime.now()
+ logger.info("Sender started at {}".format(str(sender_start)))
+
+ # Signal data_plane_utils that sender and sniffer threads have begun
+ self.io_ready_event.set()
+
+ for entry in self.packets_list:
+ time.sleep(self.send_interval)
+ testutils.send_packet(self.ptfadapter, *entry)
+
+ logger.info("Sender has been running for {}".format(
+ str(datetime.datetime.now() - sender_start)))
+
+
+ def traffic_sniffer_thread(self):
+ """
+ @summary: Generalized sniffer thread (to be used for traffic in both directions)
+ Starts `scapy_sniff` thread, and waits for its setup before signalling the sender thread to start
+ """
+ wait = self.time_to_listen + self.sniff_time_incr
+ sniffer_start = datetime.datetime.now()
+ logger.info("Sniffer started at {}".format(str(sniffer_start)))
+ sniff_filter = "tcp and tcp dst port {} and tcp src port 1234 and not icmp".format(TCP_DST_PORT)
+
+ scapy_sniffer = threading.Thread(target=self.scapy_sniff, kwargs={'sniff_timeout': wait,
+ 'sniff_filter': sniff_filter})
+ scapy_sniffer.start()
+ time.sleep(2) # Let the scapy sniff initialize completely.
+ self.sniffer_started.set() # Unblock waiter for the send_in_background.
+ scapy_sniffer.join()
+ logger.info("Sniffer has been running for {}".format(str(datetime.datetime.now() - sniffer_start)))
+ self.sniffer_started.clear()
+
+
+ def scapy_sniff(self, sniff_timeout=180, sniff_filter=''):
+ """
+ @summary: PTF runner - runs a sniffer in PTF container.
+ Running sniffer in sonic-mgmt container has missing SOCKET problem
+ and permission issues (scapy and tcpdump require root user)
+ The remote function listens on all ports. Once found, all packets
+ are dumped to local pcap file, and all packets are saved to
+ self.all_packets as scapy type.
+
+ Args:
+ sniff_timeout (int): Duration in seconds to sniff the traffic
+ sniff_filter (str): Filter that Scapy will use to collect only relevant packets
+ """
+ capture_pcap = '/tmp/capture.pcap'
+ sniffer_log = '/tmp/dualtor-sniffer.log'
+ result = ptf_runner(
+ self.ptfhost,
+ "ptftests",
+ "dualtor_sniffer.Sniff",
+ qlen=PTFRUNNER_QLEN,
+ platform_dir="ptftests",
+ platform="remote",
+ params={
+ "sniff_timeout" : sniff_timeout,
+ "sniff_filter" : sniff_filter,
+ "capture_pcap": capture_pcap,
+ "sniffer_log": sniffer_log,
+ "port_filter_expression": 'not (arp and ether src {})\
+ and not tcp'.format(self.dut_mac)
+ },
+ log_file=sniffer_log,
+ module_ignore_errors=False
+ )
+ logger.debug("Ptf_runner result: {}".format(result))
+
+ logger.info('Fetching log files from ptf and dut hosts')
+ logs_list = [
+ {'src': sniffer_log, 'dest': '/tmp/', 'flat': True, 'fail_on_missing': False},
+ {'src': capture_pcap, 'dest': '/tmp/', 'flat': True, 'fail_on_missing': False}
+ ]
+
+ for log_item in logs_list:
+ self.ptfhost.fetch(**log_item)
+
+ self.all_packets = scapyall.rdpcap(capture_pcap)
+ logger.info("Number of all packets captured: {}".format(len(self.all_packets)))
+
+
+ def get_total_disruptions(self):
+ return self.disrupts_count
+
+
+ def get_longest_disruption(self):
+ return self.max_disrupt_time
+
+
+ def get_total_disrupted_packets(self):
+ return self.total_disrupt_packets
+
+
+ def get_total_sent_packets(self):
+ return len(self.packets_list)
+
+
+ def get_total_received_packets(self):
+ return self.received_counter
+
+
+ def get_total_lost_packets(self):
+ return self.total_lost_packets
+
+
+ def get_total_disrupt_time(self):
+ return self.total_disrupt_time
+
+
+ def get_duplicated_packets_count(self):
+ return self.duplicated_packets_count
+
+
+ def no_flood(self, packet):
+ """
+ @summary: This method filters packets which are unique (i.e. no floods).
+ """
+ if (not int(str(packet[scapyall.TCP].payload).replace('X',''))in self.unique_id)\
+ and (packet[scapyall.Ether].src in self.received_pkt_src_mac):
+ # This is a unique (no flooded) received packet.
+ self.unique_id.add(int(str(packet[scapyall.TCP].payload).replace('X','')))
+ return True
+ elif packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
+ # This is a sent packet.
+ return True
+ else:
+ return False
+
+
+ def examine_flow(self):
+ """
+ @summary: This method examines packets collected by sniffer thread
+ The method compares TCP payloads of the packets one by one (assuming all
+ payloads are consecutive integers), and the losses if found - are treated
+ as disruptions in Dataplane forwarding. All disruptions are saved to
+ self.lost_packets dictionary, in format:
+ disrupt_start_id = (missing_packets_count, disrupt_time,
+ disrupt_start_timestamp, disrupt_stop_timestamp)
+ """
+ examine_start = datetime.datetime.now()
+ logger.info("Packet flow examine started {}".format(str(examine_start)))
+
+ if not self.all_packets:
+ logger.error("self.all_packets not defined.")
+ return None
+ # Filter out packets and remove floods:
+ filtered_packets = [ pkt for pkt in self.all_packets if
+ scapyall.TCP in pkt and
+ not scapyall.ICMP in pkt and
+ pkt[scapyall.TCP].sport == 1234 and
+ pkt[scapyall.TCP].dport == TCP_DST_PORT and
+ self.check_tcp_payload(pkt) and
+ self.no_flood(pkt)
+ ]
+ logger.info("Number of filtered packets captured: {}".format(len(filtered_packets)))
+
+ # Re-arrange packets, if delayed, by Payload ID and Timestamp:
+ packets = sorted(filtered_packets, key = lambda packet: (
+ int(str(packet[scapyall.TCP].payload).replace('X','')), packet.time ))
+ self.max_disrupt, self.total_disruption = 0, 0
+
+ if not packets or len(packets) == 0:
+ logger.error("Sniffer failed to capture any traffic")
+ return
+ else:
+ logger.info("Measuring traffic disruptions..")
+ filename = '/tmp/capture_filtered.pcap'
+ scapyall.wrpcap(filename, packets)
+ logger.info("Filtered pcap dumped to {}".format(filename))
+
+ self.examine_each_packet(packets)
+
+ self.disrupts_count = len(self.lost_packets) # Total disrupt counter.
+ if self.lost_packets:
+ # Find the longest loss with the longest time:
+ _, (self.max_lost_id, self.max_disrupt_time, self.no_routing_start,
+ self.no_routing_stop) = \
+ max(self.lost_packets.items(), key = lambda item:item[1][0:2])
+ self.total_disrupt_packets = sum([item[0] for item in self.lost_packets.values()])
+ self.total_disrupt_time = sum([item[1] for item in self.lost_packets.values()])
+ elif self.total_lost_packets == 0:
+ self.max_lost_id = 0
+ self.max_disrupt_time = 0
+ self.total_disrupt_packets = 0
+ self.total_disrupt_time = 0
+ logger.info("Gaps in forwarding not found.")
+
+ logger.info("Packet flow examine finished after {}".format(
+ str(datetime.datetime.now() - examine_start)))
+ logger.info("Total number of filtered incoming packets captured {}".format(
+ self.received_counter))
+ logger.info("Number of duplicated packets received: {}".format(
+ self.duplicated_packets_count))
+ logger.info("Number of packets lost: {}".format(self.total_lost_packets))
+
+
+ def examine_each_packet(self, packets):
+ lost_packets = dict()
+ sent_packets = dict()
+ duplicated_packets_count = 0
+ prev_payload, prev_time = None, None
+ sent_payload = 0
+ disruption_start, disruption_stop = None, None
+ received_counter = 0 # Counts packets from dut.
+ for packet in packets:
+ if packet[scapyall.Ether].dst == self.sent_pkt_dst_mac:
+ # This is a sent packet - keep track of it as payload_id:timestamp.
+ sent_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
+ sent_packets[sent_payload] = packet.time
+ continue
+ if packet[scapyall.Ether].src in self.received_pkt_src_mac:
+ # This is a received packet.
+ received_time = packet.time
+ received_payload = int(str(packet[scapyall.TCP].payload).replace('X',''))
+ if received_payload == prev_payload:
+ # make account for packet duplication, and keep looking for a
+ # new and unique received packet
+ duplicated_packets_count = duplicated_packets_count + 1
+ continue
+ received_counter += 1
+ if not (received_payload and received_time):
+ # This is the first valid received packet.
+ prev_payload = received_payload
+ prev_time = received_time
+ continue
+ if received_payload - prev_payload > 1:
+ # Packets in a row are missing, a disruption.
+ lost_id = (received_payload - 1) - prev_payload # How many packets lost in a row.
+ # How long disrupt lasted.
+ disrupt = (sent_packets[received_payload] - sent_packets[prev_payload + 1])
+ # Add disruption to the lost_packets dict:
+ lost_packets[prev_payload] = (lost_id, disrupt, received_time - disrupt, received_time)
+ logger.info("Disruption between packet ID %d and %d. For %.4f " % (
+ prev_payload, received_payload, disrupt))
+ if not disruption_start:
+ disruption_start = datetime.datetime.fromtimestamp(prev_time)
+ disruption_stop = datetime.datetime.fromtimestamp(received_time)
+ prev_payload = received_payload
+ prev_time = received_time
+
+ self.total_lost_packets = len(sent_packets) - received_counter
+ self.received_counter = received_counter
+ self.lost_packets = lost_packets
+ self.duplicated_packets_count = duplicated_packets_count
+
+ if self.received_counter == 0:
+ logger.error("Sniffer failed to filter any traffic from DUT")
+ if self.lost_packets:
+ logger.info("Disruptions happen between {} and {}.".format(
+ str(disruption_start), str(disruption_stop)))
+
+
+ def check_tcp_payload(self, packet):
+ """
+ @summary: Helper method
+
+ Returns: Bool: True if a packet is not corrupted and has a valid TCP
+ sequential TCP Payload
+ """
+ try:
+ int(str(packet[scapyall.TCP].payload).replace('X','')) in range(
+ self.packets_to_send)
+ return True
+ except Exception as err:
+ return False
diff --git a/tests/common/dualtor/dual_tor_mock.py b/tests/common/dualtor/dual_tor_mock.py
new file mode 100644
index 00000000000..7d3fecaab1f
--- /dev/null
+++ b/tests/common/dualtor/dual_tor_mock.py
@@ -0,0 +1,345 @@
+import json
+import logging
+import os
+import pytest
+
+from ipaddress import ip_interface, IPv4Interface, IPv6Interface, \
+ ip_address, IPv4Address
+
+from tests.common.dualtor.dual_tor_utils import tor_mux_intfs
+
+__all__ = ['apply_active_state_to_orchagent', 'apply_dual_tor_neigh_entries', 'apply_dual_tor_peer_switch_route', 'apply_mock_dual_tor_kernel_configs',
+ 'apply_mock_dual_tor_tables', 'apply_mux_cable_table_to_dut', 'apply_peer_switch_table_to_dut', 'apply_standby_state_to_orchagent', 'apply_tunnel_table_to_dut',
+ 'mock_peer_switch_loopback_ip', 'mock_server_base_ip_addr']
+
+logger = logging.getLogger(__name__)
+
+'''
+Fixtures and helper methods to configure a single ToR testbed to mock the standby or active ToR in a dual ToR testbed
+
+Test functions wishing to apply the full mock config must use the following fixtures:
+ - apply_mock_dual_tor_tables
+ - apply_mock_dual_tor_kernel_configs
+ - apply_active_state_to_orchagent OR apply_standby_state_to_orchagent
+'''
+
+def _apply_config_to_swss(dut, swss_config_str, swss_filename='swss_config_file'):
+ '''
+ Applies a given configuration string to the SWSS container
+
+ Args:
+ dut: DUT object
+ swss_config_str: String containing the configuration to be applied
+ swss_filename: The filename to use for copying the config file around (default='swss_config_file')
+ '''
+
+ dut_filename = os.path.join('/tmp',swss_filename)
+
+ dut.shell('echo "{}" > {}'.format(swss_config_str, dut_filename))
+ dut.shell('docker cp {} swss:{}'.format(dut_filename, swss_filename))
+ dut.shell('docker exec swss sh -c "swssconfig {}"'.format(swss_filename))
+
+
+def _apply_dual_tor_state_to_orchagent(dut, state, tor_mux_intfs):
+ '''
+ Helper function to configure active/standby state in orchagent
+
+ Args:
+ dut: DUT object
+ state: either 'active' or 'standby'
+ '''
+
+ logger.info("Applying {} state to orchagent".format(state))
+
+ intf_configs = []
+
+ for intf in tor_mux_intfs:
+ '''
+ For each VLAN interface, create one configuration to be applied to orchagent
+ Each interface configuration has the following structure:
+
+ {
+ "MUX_CABLE_TABLE:": {
+ "state":
+ }
+ "OP": "SET"
+ }
+ '''
+ intf_config_dict = {}
+ state_dict = {}
+
+ state_key = '"MUX_CABLE_TABLE:{}"'.format(intf)
+ state_dict = {'"state"': '"{}"'.format(state)}
+ intf_config_dict[state_key] = state_dict
+ intf_config_dict['"OP"'] = '"SET"'
+
+ intf_configs.append(intf_config_dict)
+
+ swss_config_str = json.dumps(intf_configs, indent=4)
+ logger.debug('SWSS config string is {}'.format(swss_config_str))
+ swss_filename = '/mux{}.json'.format(state)
+ _apply_config_to_swss(dut, swss_config_str, swss_filename)
+
+ yield
+ logger.info("Removing {} state from orchagent".format(state))
+
+ for i in range(len(intf_configs)):
+ intf_configs[i]['"OP"'] = '"DEL"'
+
+ swss_config_str = json.dumps(intf_configs, indent=4)
+ swss_filename = '/mux{}.json'.format(state)
+ _apply_config_to_swss(dut, swss_config_str, swss_filename)
+
+
+@pytest.fixture(scope='module')
+def apply_active_state_to_orchagent(rand_selected_dut, tor_mux_intfs):
+ dut = rand_selected_dut
+
+ for func in _apply_dual_tor_state_to_orchagent(dut, 'active', tor_mux_intfs):
+ yield func
+
+
+@pytest.fixture(scope='module')
+def apply_standby_state_to_orchagent(rand_selected_dut, tor_mux_intfs):
+ dut = rand_selected_dut
+
+ for func in _apply_dual_tor_state_to_orchagent(dut, 'standby', tor_mux_intfs):
+ yield func
+
+
+@pytest.fixture(scope='module')
+def mock_peer_switch_loopback_ip(rand_selected_dut):
+ '''
+ Returns the mocked peer switch loopback IP
+
+ The peer switch loopback is always the next IP address after the DUT loopback
+
+ Returns:
+ IPv4Interface object
+ '''
+
+ dut = rand_selected_dut
+ lo_facts = dut.get_running_config_facts()['LOOPBACK_INTERFACE']
+ loopback_intf = list(lo_facts.keys())[0]
+
+ peer_ipv4_loopback = None
+
+ for ip_addr_str in lo_facts[loopback_intf]:
+ ip_addr = ip_interface(ip_addr_str)
+
+ if type(ip_addr) is IPv4Interface:
+ peer_ipv4_loopback = ip_addr + 1
+
+ logger.debug("Mocked peer switch loopback is {}".format(peer_ipv4_loopback))
+ return peer_ipv4_loopback
+
+
+@pytest.fixture(scope='module')
+def mock_server_base_ip_addr(rand_selected_dut):
+ '''
+ Calculates the IP address of the first server
+
+ These base addresses are always the next IPs after the VLAN address
+
+ Returns:
+ IPv4Interface and IPv6 interface objects reperesenting the first server addresses
+ '''
+ dut = rand_selected_dut
+ vlan_interface = dut.get_running_config_facts()['VLAN_INTERFACE']
+
+ vlan = list(vlan_interface.keys())[0]
+
+ server_ipv4_base_addr = None
+ server_ipv6_base_addr = None
+
+ for ip_addr_str in vlan_interface[vlan].keys():
+ ip_addr = ip_interface(ip_addr_str)
+
+ if type(ip_addr) is IPv4Interface:
+ server_ipv4_base_addr = ip_addr + 1
+ elif type(ip_addr) is IPv6Interface:
+ server_ipv6_base_addr = ip_addr + 1
+
+ logger.debug("Mocked server IP base addresses are: {} and {}".format(server_ipv4_base_addr, server_ipv6_base_addr))
+ return server_ipv4_base_addr, server_ipv6_base_addr
+
+
+@pytest.fixture(scope='module')
+def apply_dual_tor_neigh_entries(rand_selected_dut, ptfadapter, tbinfo, mock_server_base_ip_addr, tor_mux_intfs):
+ '''
+ Apply neighber table entries for servers
+ '''
+ logger.info("Applying dual ToR neighbor entries")
+
+ dut = rand_selected_dut
+
+ server_ipv4_base_addr, _ = mock_server_base_ip_addr
+
+ server_ip_to_mac_map = {}
+
+ dut_ptf_intf_map = dut.get_extended_minigraph_facts(tbinfo)['minigraph_ptf_indices']
+
+ for i, intf in enumerate(tor_mux_intfs):
+ # For each VLAN interface, get the corresponding PTF interface MAC
+ ptf_port_index = dut_ptf_intf_map[intf]
+ ptf_mac = ptfadapter.dataplane.ports[(0, ptf_port_index)].mac()
+ server_ip_to_mac_map[server_ipv4_base_addr.ip + i] = ptf_mac
+
+ vlan_interface = dut.get_running_config_facts()['VLAN_INTERFACE']
+ vlan = list(vlan_interface.keys())[0]
+
+ cmds = []
+ for ip, mac in server_ip_to_mac_map.items():
+ # Use `ip neigh replace` in case entries already exist for the target IP
+ # If there are no pre-existing entries, equivalent to `ip neigh add`
+ cmds.append('ip -4 neigh replace {} lladdr {} dev {}'.format(ip, mac, vlan))
+ dut.shell_cmds(cmds=cmds)
+
+ yield
+
+ logger.info("Removing dual ToR neighbor entries")
+
+ cmds = []
+ for ip in server_ip_to_mac_map.keys():
+ cmds.append('ip -4 neigh del {} dev {}'.format(ip, vlan))
+ dut.shell_cmds(cmds=cmds)
+
+
+@pytest.fixture(scope='module')
+def apply_dual_tor_peer_switch_route(rand_selected_dut, mock_peer_switch_loopback_ip):
+ '''
+ Apply the tunnel route to reach the peer switch via the T1 switches
+ '''
+ logger.info("Applying dual ToR peer switch loopback route")
+ dut = rand_selected_dut
+ bgp_neighbors = dut.bgp_facts()['ansible_facts']['bgp_neighbors'].keys()
+
+ ipv4_neighbors = []
+
+ for neighbor in bgp_neighbors:
+ neighbor_ip = ip_address(neighbor)
+
+ if type(neighbor_ip) is IPv4Address:
+ ipv4_neighbors.append(neighbor)
+
+ nexthop_str = ''
+ for neighbor in ipv4_neighbors:
+ nexthop_str += 'nexthop via {} '.format(neighbor)
+
+ # Use `ip route replace` in case a rule already exists for this IP
+ # If there are no pre-existing routes, equivalent to `ip route add`
+ dut.shell('ip route replace {} {}'.format(mock_peer_switch_loopback_ip, nexthop_str))
+
+ yield
+
+ logger.info("Removing dual ToR peer switch loopback route")
+
+ dut.shell('ip route del {}'.format(mock_peer_switch_loopback_ip))
+
+
+@pytest.fixture(scope='module')
+def apply_peer_switch_table_to_dut(rand_selected_dut, mock_peer_switch_loopback_ip):
+ '''
+ Adds the PEER_SWITCH table to config DB and the peer_switch field to the device metadata
+ Also adds the 'subtype' field in the device metadata table and sets it to 'DualToR'
+ '''
+ logger.info("Applying PEER_SWITCH table")
+ dut = rand_selected_dut
+ peer_switch_hostname = 'switch_hostname'
+ peer_switch_key = 'PEER_SWITCH|{}'.format(peer_switch_hostname)
+ device_meta_key = 'DEVICE_METADATA|localhost'
+
+ dut.shell('redis-cli -n 4 HSET "{}" "address_ipv4" "{}"'.format(peer_switch_key, mock_peer_switch_loopback_ip.ip))
+ dut.shell('redis-cli -n 4 HSET "{}" "{}" "{}"'.format(device_meta_key, 'subtype', 'dualToR'))
+ dut.shell('redis-cli -n 4 HSET "{}" "{}" "{}"'.format(device_meta_key, 'peer_switch', peer_switch_hostname))
+
+ yield
+ logger.info("Removing peer switch table")
+
+ dut.shell('redis-cli -n 4 DEL "{}"'.format(peer_switch_key))
+ dut.shell('redis-cli -n 4 HDEL"{}" "{}" "{}"'.format(device_meta_key, 'subtype', 'dualToR'))
+ dut.shell('redis-cli -n 4 HDEL "{}" "{}" "{}"'.format(device_meta_key, 'peer_switch', peer_switch_hostname))
+
+
+@pytest.fixture(scope='module')
+def apply_tunnel_table_to_dut(rand_selected_dut, mock_peer_switch_loopback_ip):
+ '''
+ Adds the TUNNEL table to config DB
+ '''
+ logger.info("Applying TUNNEL table")
+ dut = rand_selected_dut
+
+ dut_loopback = (mock_peer_switch_loopback_ip - 1).ip
+
+ tunnel_key = 'TUNNEL|MuxTunnel0'
+ tunnel_params = {
+ 'dscp_mode': 'uniform',
+ 'dst_ip': dut_loopback,
+ 'ecn_mode': 'copy_from_outer',
+ 'encap_ecn_mode': 'standard',
+ 'ttl_mode': 'pipe',
+ 'tunnel_type': 'IPINIP'
+ }
+
+ for param, value in tunnel_params.items():
+ dut.shell('redis-cli -n 4 HSET "{}" "{}" "{}"'.format(tunnel_key, param, value))
+
+ yield
+ logger.info("Removing tunnel table")
+
+ dut.shell('redis-cli -n 4 DEL "{}"'.format(tunnel_key))
+
+
+@pytest.fixture(scope='module')
+def apply_mux_cable_table_to_dut(rand_selected_dut, mock_server_base_ip_addr, tor_mux_intfs):
+ '''
+ Adds the MUX_CABLE table to config DB
+ '''
+ logger.info("Applying MUX_CABLE table")
+ dut = rand_selected_dut
+
+ server_ipv4_base_addr, server_ipv6_base_addr = mock_server_base_ip_addr
+
+ keys_inserted = []
+
+ cmds = []
+ for i, intf in enumerate(tor_mux_intfs):
+ server_ipv4 = str(server_ipv4_base_addr + i)
+ server_ipv6 = str(server_ipv6_base_addr + i)
+ key = 'MUX_CABLE|{}'.format(intf)
+ keys_inserted.append(key)
+ cmds.append('redis-cli -n 4 HSET "{}" "server_ipv4" "{}"'.format(key, server_ipv4))
+ cmds.append('redis-cli -n 4 HSET "{}" "server_ipv6" "{}"'.format(key, server_ipv6))
+ cmds.append('redis-cli -n 4 HSET "{}" "state" "auto"'.format(key))
+ dut.shell_cmds(cmds=cmds)
+
+ yield
+ logger.info("Removing mux cable table")
+
+ cmds = []
+ for key in keys_inserted:
+ cmds.append('redis-cli -n 4 DEL "{}"'.format(key))
+ dut.shell_cmds(cmds=cmds)
+
+
+@pytest.fixture(scope='module')
+def apply_mock_dual_tor_tables(request, tbinfo):
+ '''
+ Wraps all table fixtures for convenience
+ '''
+ if tbinfo["topo"]["name"] == "t0":
+ request.getfixturevalue("apply_mux_cable_table_to_dut")
+ request.getfixturevalue("apply_tunnel_table_to_dut")
+ request.getfixturevalue("apply_peer_switch_table_to_dut")
+ logger.info("Done applying database tables for dual ToR mock")
+
+
+@pytest.fixture(scope='module')
+def apply_mock_dual_tor_kernel_configs(request, tbinfo):
+ '''
+ Wraps all kernel related (routes and neighbor entries) fixtures for convenience
+ '''
+ if tbinfo["topo"]["name"] == "t0":
+ request.getfixturevalue("apply_dual_tor_peer_switch_route")
+ request.getfixturevalue("apply_dual_tor_neigh_entries")
+ logger.info("Done applying kernel configs for dual ToR mock")
diff --git a/tests/common/dualtor/dual_tor_utils.py b/tests/common/dualtor/dual_tor_utils.py
index 136f0e526bf..a3dc04e972b 100644
--- a/tests/common/dualtor/dual_tor_utils.py
+++ b/tests/common/dualtor/dual_tor_utils.py
@@ -1,29 +1,43 @@
+import contextlib
import logging
import pytest
+import random
import json
-import ptf.testutils as testutils
+from datetime import datetime
+from tests.ptf_runner import ptf_runner
-from ipaddress import ip_interface
from natsort import natsorted
from tests.common.config_reload import config_reload
from tests.common.helpers.assertions import pytest_assert
from tests.common.helpers.assertions import pytest_assert as pt_assert
from tests.common.helpers.dut_ports import encode_dut_port_name
+from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR
+
+__all__ = ['tor_mux_intf', 'tor_mux_intfs', 'ptf_server_intf', 't1_upper_tor_intfs', 't1_lower_tor_intfs', 'upper_tor_host', 'lower_tor_host', 'force_active_tor']
logger = logging.getLogger(__name__)
-UPPER_TOR = 'upper_tor'
-LOWER_TOR = 'lower_tor'
+
+def get_tor_mux_intfs(duthost):
+ return sorted(duthost.get_vlan_intfs(), key=lambda intf: int(intf.replace('Ethernet', '')))
@pytest.fixture(scope='session')
-def tor_mux_intf(duthosts):
+def tor_mux_intfs(duthosts):
'''
- Returns the server-facing interface on the ToR to be used for testing
+ Returns the server-facing interfaces on the ToR to be used for testing
'''
# The same ports on both ToRs should be connected to the same PTF port
- dut = duthosts[0]
- return sorted(dut.get_vlan_intfs(), key=lambda intf: int(intf.replace('Ethernet', '')))[0]
+ return get_tor_mux_intfs(duthosts[0])
+
+
+@pytest.fixture(scope='session')
+def tor_mux_intf(tor_mux_intfs):
+ '''
+ Returns the first server-facing interface on the ToR to be used for testing
+ '''
+ # The same ports on both ToRs should be connected to the same PTF port
+ return tor_mux_intfs[0]
@pytest.fixture(scope='session')
@@ -76,7 +90,7 @@ def lower_tor_host(duthosts):
Uses the convention that the second ToR listed in the testbed file is the lower ToR
'''
- dut = duthosts[1]
+ dut = duthosts[-1]
logger.info("Using {} as lower ToR".format(dut.hostname))
return dut
@@ -95,27 +109,57 @@ def map_hostname_to_tor_side(tbinfo, hostname):
return None
+def get_t1_ptf_pc_ports(dut, tbinfo):
+ """Gets the PTF portchannel ports connected to the T1 switchs."""
+ config_facts = dut.get_running_config_facts()
+ mg_facts = dut.get_extended_minigraph_facts(tbinfo)
+
+ pc_ports = {}
+ for pc in config_facts['PORTCHANNEL'].keys():
+ pc_ports[pc] = []
+ for intf in config_facts["PORTCHANNEL"][pc]["members"]:
+ ptf_port_index = mg_facts["minigraph_ptf_indices"][intf]
+ intf_name = "eth{}".format(ptf_port_index)
+ pc_ports[pc].append(intf_name)
+
+ return pc_ports
+
+
def get_t1_ptf_ports(dut, tbinfo):
'''
Gets the PTF ports connected to a given DUT for the first T1
'''
- config_facts = dut.get_running_config_facts()
- mg_facts = dut.get_extended_minigraph_facts(tbinfo)
+ pc_ports = get_t1_ptf_pc_ports(dut, tbinfo)
# Always choose the first portchannel
- portchannel = sorted(config_facts['PORTCHANNEL'].keys())[0]
- dut_portchannel_members = config_facts['PORTCHANNEL'][portchannel]['members']
+ portchannel = sorted(pc_ports.keys())[0]
+ ptf_portchannel_intfs = pc_ports[portchannel]
- ptf_portchannel_intfs = []
+ logger.info("Using portchannel ports {} on PTF for DUT {}".format(ptf_portchannel_intfs, dut.hostname))
+ return ptf_portchannel_intfs
- for intf in dut_portchannel_members:
- member = mg_facts['minigraph_ptf_indices'][intf]
- intf_name = 'eth{}'.format(member)
- ptf_portchannel_intfs.append(intf_name)
- logger.info("Using portchannel ports {} on PTF for DUT {}".format(ptf_portchannel_intfs, dut.hostname))
+def get_t1_active_ptf_ports(dut, tbinfo):
+ """
+ @summary: Get ptf port indices for active PortChannels on DUT
+ @param dut: The DUT we are testing against
+ @param tbinfo: The fixture tbinfo
+ @return: A dict { "PortChannel0001": [0, 1], ...}
+ """
+ config_facts = dut.get_running_config_facts()
+ mg_facts = dut.get_extended_minigraph_facts(tbinfo)
+
+ up_portchannels = dut.get_up_ip_ports()
+ ptf_portchannel_intfs = {}
+ for k, v in config_facts['PORTCHANNEL'].items():
+ if k in up_portchannels:
+ ptf_portchannel_intfs[k] = []
+ for member in v['members']:
+ ptf_portchannel_intfs[k].append(mg_facts['minigraph_ptf_indices'][member])
+
return ptf_portchannel_intfs
+
def update_mux_configs_and_config_reload(dut, state):
"""
@summary: Update config_db.json, and then load with 'config reload'
@@ -133,7 +177,7 @@ def update_mux_configs_and_config_reload(dut, state):
# Update mux_cable state and dump to a temp file
mux_cable_config_json = json.loads(mux_cable_config)
for _, config in mux_cable_config_json.items():
- config['state'] = state
+ config['state'] = state
mux_cable_config_json = {"MUX_CABLE": mux_cable_config_json}
TMP_FILE = "/tmp/mux_config.json"
with open(TMP_FILE, "w") as f:
@@ -151,19 +195,31 @@ def update_mux_configs_and_config_reload(dut, state):
dut.file(path=TMP_FILE, state='absent')
-def force_active_tor(dut, intf):
+@pytest.fixture
+def force_active_tor():
"""
@summary: Manually set dut host to the active tor for intf
@param dut: The duthost for which to toggle mux
@param intf: One or a list of names of interface or 'all' for all interfaces
"""
- if type(intf) == str:
- cmds = ["config muxcable mode active {}".format(intf)]
- else:
- cmds = []
- for i in intf:
- cmds.append("config muxcable mode active {}".format(i))
- dut.shell_cmds(cmds=cmds)
+ forced_intfs = []
+ def force_active_tor_fn(dut, intf):
+ if type(intf) == str:
+ cmds = ["config muxcable mode active {}; true".format(intf)]
+ forced_intfs.append((dut, intf))
+ else:
+ cmds = []
+ for i in intf:
+ forced_intfs.append((dut, i))
+ cmds.append("config muxcable mode active {}; true".format(i))
+ dut.shell_cmds(cmds=cmds, continue_on_fail=True)
+
+ yield force_active_tor_fn
+
+ for x in forced_intfs:
+ x[0].shell("config muxcable mode auto {}; true".format(x[1]))
+
+
def _get_tor_fanouthosts(tor_host, fanouthosts):
"""Helper function to get the fanout host objects that the current tor_host connected to.
@@ -475,29 +531,102 @@ def shutdown(vm_names=None, upper=False, lower=False):
eos_host.no_shutdown(vm_intf)
-@pytest.fixture(scope='function', autouse=True)
-def start_linkmgrd_heartbeat(ptfadapter, duthost, tbinfo):
- '''
- Send a GARP from from PTF->ToR from each PTF port connected to a mux cable
-
- This is needed since linkmgrd will not start sending heartbeats until the PTF MAC is learned in the DUT neighbor table
- '''
- garp_pkts = {}
+def mux_cable_server_ip(dut):
+ """Function for retrieving all ip of servers connected to mux_cable
- ptf_indices = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_ptf_indices"]
- mux_cable_table = duthost.get_running_config_facts()['MUX_CABLE']
+ Args:
+ dut: The host object
- for vlan_intf, config in mux_cable_table.items():
- ptf_port_index = ptf_indices[vlan_intf]
- server_ip = ip_interface(config['server_ipv4'])
- ptf_mac = ptfadapter.dataplane.ports[(0, ptf_port_index)].mac()
+ Returns:
+ A dict: {"Ethernet12" : {"server_ipv4":"192.168.0.4/32", "server_ipv6":"fc02:1000::4/128"}, ....}
+ """
+ mux_cable_config = dut.shell("sonic-cfggen -d --var-json 'MUX_CABLE'")['stdout']
+ return json.loads(mux_cable_config)
- garp_pkt = testutils.simple_arp_packet(eth_src=ptf_mac,
- hw_snd=ptf_mac,
- ip_snd=str(server_ip.ip),
- ip_tgt=str(server_ip.ip), # Re-use server IP as target IP, since it is within the subnet of the VLAN IP
- arp_op=2)
- garp_pkts[ptf_port_index] = garp_pkt
- for port, pkt in garp_pkts.items():
- testutils.send_packet(ptfadapter, port, pkt)
+def check_tunnel_balance(ptfhost, active_tor_mac, standby_tor_mac, vlan_mac, active_tor_ip, standby_tor_ip, targer_server_ip, target_server_port, ptf_portchannel_indices):
+ """
+ Function for testing traffic distribution among all avtive T1.
+ A test script will be running on ptf to generate traffic to standby interface, and the traffic will be forwarded to
+ active ToR. The running script will capture all traffic and verify if these packets are distributed evenly.
+ Args:
+ ptfhost: The ptf host connected to current testbed
+ active_tor_mac: MAC address of active ToR
+ standby_tor_mac: MAC address of the standby ToR
+ vlan_mac: MAC address of Vlan (For verifying packet)
+ active_tor_ip: IP Address of Loopback0 of active ToR (For verifying packet)
+ standby_tor_ip: IP Address of Loopback0 of standby ToR (For verifying packet)
+ target_server_ip: The IP address of server for testing. The mux cable connected to this server must be standby
+ target_server_port: PTF port indice on which server is connected
+ ptf_portchannel_indices: A dict, the mapping from portchannel to ptf port indices
+ Returns:
+ None.
+ """
+ HASH_KEYS = ["src-port", "dst-port", "src-ip"]
+ params = {
+ "server_ip": targer_server_ip,
+ "server_port": target_server_port,
+ "active_tor_mac": active_tor_mac,
+ "standby_tor_mac": standby_tor_mac,
+ "vlan_mac": vlan_mac,
+ "active_tor_ip": active_tor_ip,
+ "standby_tor_ip": standby_tor_ip,
+ "ptf_portchannel_indices": ptf_portchannel_indices,
+ "hash_key_list": HASH_KEYS
+ }
+ logging.info("run ptf test for verifying IPinIP tunnel balance")
+ timestamp = datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
+ log_file = "/tmp/ip_in_ip_tunnel_test.{}.log".format(timestamp)
+ logging.info("PTF log file: %s" % log_file)
+ ptf_runner(ptfhost,
+ "ptftests",
+ "ip_in_ip_tunnel_test.IpinIPTunnelTest",
+ platform_dir="ptftests",
+ params=params,
+ log_file=log_file,
+ qlen=2000,
+ socket_recv_size=16384)
+
+
+def get_crm_nexthop_counter(host):
+ """
+ Get used crm nexthop counter
+ """
+ crm_facts = host.get_crm_facts()
+ return crm_facts['resources']['ipv4_nexthop']['used']
+
+
+def show_arp(duthost, neighbor_addr):
+ """Show arp table entry for neighbor."""
+ command = "/usr/sbin/arp -n %s" % neighbor_addr
+ output = duthost.shell(command)["stdout_lines"]
+ if "no entry" in output[0]:
+ return {}
+ headers = ("address", "hwtype", "hwaddress", "flags", "iface")
+ return dict(zip(headers, output[1].split()))
+
+
+@contextlib.contextmanager
+def flush_neighbor(duthost, neighbor, restore=True):
+ """Flush neighbor entry for server in duthost."""
+ neighbor_info = show_arp(duthost, neighbor)
+ logging.info("neighbor entry for %s: %s", neighbor, neighbor_info)
+ assert neighbor_info, "No neighbor info for neighbor %s" % neighbor
+ logging.info("remove neighbor entry for %s", neighbor)
+ duthost.shell("ip -4 neighbor del %s dev %s" % (neighbor, neighbor_info["iface"]))
+ try:
+ yield
+ finally:
+ if restore:
+ logging.info("restore neighbor entry for %s", neighbor)
+ duthost.shell("ip -4 neighbor replace {address} lladdr {hwaddress} dev {iface}".format(**neighbor_info))
+
+
+@pytest.fixture(scope="function")
+def rand_selected_interface(rand_selected_dut):
+ """Select a random interface to test."""
+ tor = rand_selected_dut
+ server_ips = mux_cable_server_ip(tor)
+ iface = str(random.choice(server_ips.keys()))
+ logging.info("select DUT interface %s to test.", iface)
+ return iface, server_ips[iface]
diff --git a/tests/common/dualtor/mux_simulator_control.py b/tests/common/dualtor/mux_simulator_control.py
index afe4c1f57f3..addfccfe247 100644
--- a/tests/common/dualtor/mux_simulator_control.py
+++ b/tests/common/dualtor/mux_simulator_control.py
@@ -2,24 +2,21 @@
import pytest
import json
import urllib2
-from tests.common.helpers.assertions import pytest_assert
+
from tests.common import utilities
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.dualtor.constants import UPPER_TOR, LOWER_TOR, TOGGLE, RANDOM, NIC, DROP, OUTPUT
-logger = logging.getLogger(__name__)
+__all__ = ['check_simulator_read_side', 'mux_server_url', 'url', 'recover_all_directions', 'set_drop', 'set_output', 'toggle_all_simulator_ports_to_another_side', \
+ 'toggle_all_simulator_ports_to_lower_tor', 'toggle_all_simulator_ports_to_random_side', 'toggle_all_simulator_ports_to_upper_tor', \
+ 'toggle_simulator_port_to_lower_tor', 'toggle_simulator_port_to_upper_tor']
-UPPER_TOR = "upper_tor"
-LOWER_TOR = "lower_tor"
-TOGGLE = "toggle"
-RANDOM = "random"
+logger = logging.getLogger(__name__)
TOGGLE_SIDES = [UPPER_TOR, LOWER_TOR, TOGGLE, RANDOM]
-NIC = "nic"
-DROP = "drop"
-OUTPUT = "output"
-
-@pytest.fixture(scope="session")
+@pytest.fixture(scope='session')
def mux_server_url(request, tbinfo):
"""
A session level fixture to retrieve the address of mux simulator address
@@ -32,27 +29,35 @@ def mux_server_url(request, tbinfo):
server = tbinfo['server']
vmset_name = tbinfo['group-name']
inv_files = request.config.option.ansible_inventory
- ip = utilities.get_test_server_vars(inv_files, server, 'ansible_host')
- port = utilities.get_group_visible_vars(inv_files, server, 'mux_simulator_port')
+ ip = utilities.get_test_server_vars(inv_files, server).get('ansible_host')
+ port = utilities.get_group_visible_vars(inv_files, server).get('mux_simulator_port')
return "http://{}:{}/mux/{}".format(ip, port, vmset_name)
-def _url(server_url, physical_port=None, action=None):
+@pytest.fixture(scope='module')
+def url(mux_server_url, duthost):
"""
- Helper function to build an url for given port and target
-
- Args:
- server_url: a str, the url for mux server, like http://10.0.0.64:8080/mux/vms17-8
- physical_port: physical port on switch, an integer starting from 1
- If physical_port is none, the returned url contains no '/port/action' (For polling/toggling all ports)
- action: a str, output|drop|None. If action is None, the returned url contains no '/action'
- Returns:
- The url for posting flow update request, like http://10.0.0.64:8080/mux/vms17-8[/1/drop|output]
+ A helper function is returned to make fixture accept arguments
"""
- if not physical_port:
- return server_url
- if not action:
- return server_url + "/{}".format(physical_port - 1)
- return server_url + "/{}/{}".format(physical_port - 1, action)
+ def _url(interface_name=None, action=None):
+ """
+ Helper function to build an url for given port and target
+
+ Args:
+ interface_name: a str, the name of interface
+ If interface_name is none, the returned url contains no '/port/action' (For polling/toggling all ports)
+ action: a str, output|drop|None. If action is None, the returned url contains no '/action'
+ Returns:
+ The url for posting flow update request, like http://10.0.0.64:8080/mux/vms17-8[/1/drop|output]
+ """
+ if not interface_name:
+ return mux_server_url
+ mg_facts = duthost.get_extended_minigraph_facts()
+ mbr_index = mg_facts['minigraph_ptf_indices'][interface_name]
+ if not action:
+ return mux_server_url + "/{}".format(mbr_index)
+ return mux_server_url + "/{}/{}".format(mbr_index, action)
+
+ return _url
def _get(server_url):
"""
@@ -107,106 +112,137 @@ def _post(server_url, data):
return False
return True
-def set_drop(mux_server_url, physical_port, directions):
- """
- A fixture to set drop for a certain direction on a port
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- directions: a list, may contain "upper_tor", "lower_tor", "nic"
- Returns:
- None.
- """
- server_url = _url(mux_server_url, physical_port, DROP)
- data = {"out_ports": directions}
- pytest_assert(_post(server_url, data), "Failed to set drop on {}".format(directions))
+@pytest.fixture(scope='module')
+def set_drop(url):
+ """
+ A helper function is returned to make fixture accept arguments
+ """
+ def _set_drop(interface_name, directions):
+ """
+ A fixture to set drop for a certain direction on a port
+ Args:
+ interface_name: a str, the name of interface
+ directions: a list, may contain "upper_tor", "lower_tor", "nic"
+ Returns:
+ None.
+ """
+ server_url = url(interface_name, DROP)
+ data = {"out_ports": directions}
+ pytest_assert(_post(server_url, data), "Failed to set drop on {}".format(directions))
-def set_output(mux_server_url, physical_port, directions):
- """
- Function to set output for a certain direction on a port
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- directions: a list, may contain "upper_tor", "lower_tor", "nic"
- Returns:
- None.
- """
- server_url = _url(mux_server_url, physical_port, OUTPUT)
- data = {"out_ports": directions}
- pytest_assert(_post(server_url, data), "Failed to set output on {}".format(directions))
+ return _set_drop
-def _simulator_port_toggle_to(mux_server_url, physical_port, target):
- """
- A helper function to toggle y_cable simulator ports
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- target: "upper_tor" or "lower_tor"
- """
- server_url = _url(mux_server_url, physical_port)
- data = {"active_side": target}
- pytest_assert(_post(server_url, data), "Failed to toggle to {} on port {}".format(target, physical_port))
+@pytest.fixture(scope='module')
+def set_output(url):
+ """
+ A helper function is returned to make fixture accept arguments
+ """
+ def _set_output(interface_name, directions):
+ """
+ Function to set output for a certain direction on a port
+ Args:
+ interface_name: a str, the name of interface
+ directions: a list, may contain "upper_tor", "lower_tor", "nic"
+ Returns:
+ None.
+ """
+ server_url = url(interface_name, OUTPUT)
+ data = {"out_ports": directions}
+ pytest_assert(_post(server_url, data), "Failed to set output on {}".format(directions))
-def toggle_simulator_port_to_upper_tor(mux_server_url, physical_port):
- """
- Function to toggle a given y_cable ports to upper_tor
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- """
- _simulator_port_toggle_to(mux_server_url, physical_port, UPPER_TOR)
+ return _set_output
-def toggle_simulator_port_to_lower_tor(mux_server_url, physical_port):
- """
- Function to toggle a given y_cable ports to lower_tor
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- """
- _simulator_port_toggle_to(mux_server_url, physical_port, LOWER_TOR)
+@pytest.fixture(scope='module')
+def toggle_simulator_port_to_upper_tor(url):
+ """
+ Returns _toggle_simulator_port_to_upper_tor to make fixture accept arguments
+ """
+ def _toggle_simulator_port_to_upper_tor(interface_name):
+ """
+ A helper function to toggle y_cable simulator ports
+ Args:
+ interface_name: a str, the name of interface
+ target: "upper_tor" or "lower_tor"
+ """
+ server_url = url(interface_name)
+ data = {"active_side": UPPER_TOR}
+ pytest_assert(_post(server_url, data), "Failed to toggle to upper_tor on interface {}".format(interface_name))
-def recover_all_directions(mux_server_url, physical_port):
+ return _toggle_simulator_port_to_upper_tor
+
+@pytest.fixture(scope='module')
+def toggle_simulator_port_to_lower_tor(url):
"""
- Function to recover all traffic on all directions on a certain port
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- Returns:
- None.
+ Returns _toggle_simulator_port_to_lower_tor to make fixture accept arguments
"""
- server_url = _url(mux_server_url, physical_port, OUTPUT)
- data = {"out_ports": [UPPER_TOR, LOWER_TOR, NIC]}
- pytest_assert(_post(server_url, data), "Failed to set output on all directions")
+ def _toggle_simulator_port_to_lower_tor(interface_name):
+ """
+ Function to toggle a given y_cable ports to lower_tor
+ Args:
+ interface_name: a str, the name of interface to control
+ """
+ server_url = url(interface_name)
+ data = {"active_side": LOWER_TOR}
+ pytest_assert(_post(server_url, data), "Failed to toggle to lower_tor on interface {}".format(interface_name))
-def check_simulator_read_side(mux_server_url, physical_port):
- """
- Retrieve the current active tor from y_cable simulator server.
- Args:
- mux_server_url: a str, the address of mux server
- physical_port: physical port on switch, an integer starting from 1
- Returns:
- 1 if upper_tor is active
- 2 if lower_tor is active
- -1 for exception or inconstient status
- """
- server_url = _url(mux_server_url, physical_port)
- res = _get(server_url)
- if not res:
- return -1
- active_side = res["active_side"]
- if active_side == UPPER_TOR:
- return 1
- elif active_side == LOWER_TOR:
- return 2
- else:
- return -1
+ return _toggle_simulator_port_to_lower_tor
-@pytest.fixture
-def get_active_torhost(mux_server_url, upper_tor_host, lower_tor_host):
+@pytest.fixture(scope='module')
+def recover_all_directions(url):
+ """
+ A function level fixture, will return _recover_all_directions to make fixture accept arguments
+ """
+ def _recover_all_directions(interface_name):
+ """
+ Function to recover all traffic on all directions on a certain port
+ Args:
+ interface_name: a str, the name of interface to control
+ Returns:
+ None.
+ """
+ server_url = url(interface_name, OUTPUT)
+ data = {"out_ports": [UPPER_TOR, LOWER_TOR, NIC]}
+ pytest_assert(_post(server_url, data), "Failed to set output on all directions for interface {}".format(interface_name))
+
+ return _recover_all_directions
+
+@pytest.fixture(scope='module')
+def check_simulator_read_side(url):
+ """
+ A function level fixture, will return _check_simulator_read_side
+ """
+ def _check_simulator_read_side(interface_name):
+ """
+ Retrieve the current active tor from y_cable simulator server.
+ Args:
+ interface_name: a str, the name of interface to control
+ Returns:
+ 1 if upper_tor is active
+ 2 if lower_tor is active
+ -1 for exception or inconsistent status
+ """
+ server_url = url(interface_name)
+ res = _get(server_url)
+ if not res:
+ return -1
+ active_side = res["active_side"]
+ if active_side == UPPER_TOR:
+ return 1
+ elif active_side == LOWER_TOR:
+ return 2
+ else:
+ return -1
+
+ return _check_simulator_read_side
- def get_active_torhost():
+@pytest.fixture(scope='module')
+def get_active_torhost(upper_tor_host, lower_tor_host, check_simulator_read_side):
+ """
+ A function level fixture which returns a helper function
+ """
+ def _get_active_torhost(interface_name):
active_tor_host = None
- active_side = check_simulator_read_side(mux_server_url, 1)
+ active_side = check_simulator_read_side(interface_name)
pytest_assert(active_side != -1, "Failed to retrieve the current active tor from y_cable simulator server")
if active_side == 1:
active_tor_host = upper_tor_host
@@ -214,7 +250,7 @@ def get_active_torhost():
active_tor_host = lower_tor_host
return active_tor_host
- return get_active_torhost
+ return _get_active_torhost
def _toggle_all_simulator_ports(mux_server_url, side):
pytest_assert(side in TOGGLE_SIDES, "Unsupported side '{}'".format(side))
@@ -231,21 +267,21 @@ def _toggle(side):
_toggle_all_simulator_ports(mux_server_url, side)
return _toggle
-@pytest.fixture(scope='module')
+@pytest.fixture
def toggle_all_simulator_ports_to_upper_tor(mux_server_url):
"""
A module level fixture to toggle all ports to upper_tor
"""
_toggle_all_simulator_ports(mux_server_url, UPPER_TOR)
-@pytest.fixture(scope='module')
+@pytest.fixture
def toggle_all_simulator_ports_to_lower_tor(mux_server_url):
"""
A module level fixture to toggle all ports to lower_tor
"""
_toggle_all_simulator_ports(mux_server_url, LOWER_TOR)
-@pytest.fixture(scope='module', autouse=True)
+@pytest.fixture
def toggle_all_simulator_ports_to_rand_selected_tor(mux_server_url, tbinfo, rand_one_dut_hostname):
"""
A module level fixture to toggle all ports to randomly selected tor
@@ -255,11 +291,10 @@ def toggle_all_simulator_ports_to_rand_selected_tor(mux_server_url, tbinfo, rand
data = {"active_side": UPPER_TOR}
else:
data = {"active_side": LOWER_TOR}
-
- server_url = _url(mux_server_url)
- pytest_assert(_post(server_url, data), "Failed to toggle all ports to {}".format(rand_one_dut_hostname))
-@pytest.fixture(scope='module')
+ pytest_assert(_post(mux_server_url, data), "Failed to toggle all ports to {}".format(rand_one_dut_hostname))
+
+@pytest.fixture
def toggle_all_simulator_ports_to_another_side(mux_server_url):
"""
A module level fixture to toggle all ports to another side
@@ -268,9 +303,22 @@ def toggle_all_simulator_ports_to_another_side(mux_server_url):
"""
_toggle_all_simulator_ports(mux_server_url, TOGGLE)
-@pytest.fixture(scope='module')
+@pytest.fixture
def toggle_all_simulator_ports_to_random_side(mux_server_url):
"""
A module level fixture to toggle all ports to a random side.
"""
_toggle_all_simulator_ports(mux_server_url, RANDOM)
+
+@pytest.fixture
+def simulator_server_down(set_drop, set_output):
+ """
+ A fixture to set drop on a given mux cable
+ """
+ tmp_list = []
+ def _drop_helper(interface_name):
+ tmp_list.append(interface_name)
+ set_drop(interface_name, [UPPER_TOR, LOWER_TOR])
+
+ yield _drop_helper
+ set_output(tmp_list[0], [UPPER_TOR, LOWER_TOR])
diff --git a/tests/common/dualtor/server_traffic_utils.py b/tests/common/dualtor/server_traffic_utils.py
new file mode 100644
index 00000000000..52e39417783
--- /dev/null
+++ b/tests/common/dualtor/server_traffic_utils.py
@@ -0,0 +1,115 @@
+"""Utils to verify traffic between ToR and server."""
+import contextlib
+import logging
+import tempfile
+import sys
+import time
+
+from io import BytesIO
+from ptf.dataplane import match_exp_pkt
+from scapy.all import sniff
+from scapy.packet import ls
+
+
+@contextlib.contextmanager
+def dump_intf_packets(ansible_host, iface, pcap_save_path, dumped_packets,
+ pcap_filter=None, cleanup_pcap=True):
+ """
+ @summary: Dump packets of the interface and save to a file.
+
+ @ansible_host: the ansible host object.
+ @iface: interface to be sniffed on.
+ @pcap_save_path: packet capture file save path.
+ @dumped_packets: a list to store the dumped packets.
+ @pcap_filter: pcap filter used by tcpdump.
+ @cleanup_pcap: True to remove packet capture file.
+ """
+
+ start_pcap = "tcpdump --immediate-mode -i %s -w %s" % (iface, pcap_save_path)
+ if pcap_filter:
+ start_pcap += (" " + pcap_filter)
+ start_pcap = "nohup %s > /dev/null 2>&1 & echo $!" % start_pcap
+ pid = ansible_host.shell(start_pcap)["stdout"]
+ # sleep to let tcpdump starts to capture
+ time.sleep(1)
+ try:
+ yield
+ finally:
+ ansible_host.shell("kill -s 2 %s" % pid)
+ with tempfile.NamedTemporaryFile() as temp_pcap:
+ ansible_host.fetch(src=pcap_save_path, dest=temp_pcap.name, flat=True)
+ packets = sniff(offline=temp_pcap.name)
+ dumped_packets.extend(packets)
+ if cleanup_pcap:
+ ansible_host.file(path=pcap_save_path, state="absent")
+
+
+class ServerTrafficMonitor(object):
+ """Monit traffic between DUT and server."""
+
+ VLAN_INTERFACE_TEMPLATE = "{external_port}.{vlan_id}"
+
+ def __init__(self, duthost, vmhost, dut_iface, conn_graph_facts, exp_pkt, existing=True):
+ """
+ @summary: Initialize the monitor.
+
+ @duthost: duthost object.
+ @vmhost: vmhost object that represent the vm host server.
+ @dut_iface: the interface on duthost selected to be monitored.
+ @conn_graph_facts: connection graph data.
+ @exp_pkt: the expected packet to be matched with packets monitored,
+ should be a `ptf.mask.Mask` object.
+ @existing: True to expect to find a match for `exp_pkt` while False to
+ expect to not find a match for `exp_pkt`.
+ """
+ self.duthost = duthost
+ self.dut_iface = dut_iface
+ self.exp_pkt = exp_pkt
+ self.vmhost = vmhost
+ self.conn_graph_facts = conn_graph_facts
+ self.captured_packets = []
+ self.matched_packets = []
+ self.vmhost_iface = self._find_vmhost_vlan_interface()
+ self.dump_utility = dump_intf_packets(
+ vmhost,
+ self.vmhost_iface,
+ tempfile.NamedTemporaryFile().name,
+ self.captured_packets
+ )
+ self.existing = existing
+
+ @staticmethod
+ def _list_layer_str(packet):
+ """Return list layer output string."""
+ _stdout, sys.stdout = sys.stdout, BytesIO()
+ try:
+ ls(packet)
+ return sys.stdout.getvalue()
+ finally:
+ sys.stdout = _stdout
+
+ def _find_vmhost_vlan_interface(self):
+ """Find the vmhost vlan interface that will be sniffed on."""
+ device_port_vlans = self.conn_graph_facts["device_port_vlans"][self.duthost.hostname]
+ vlan_id = device_port_vlans[self.dut_iface]["vlanlist"][0]
+ return self.VLAN_INTERFACE_TEMPLATE.format(external_port=self.vmhost.external_port, vlan_id=vlan_id)
+
+ def __enter__(self):
+ self.captured_packets[:] = []
+ self.matched_packets[:] = []
+ self.dump_utility.__enter__()
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self.dump_utility.__exit__(exc_type, exc_value, traceback)
+ logging.info("the expected packet:\n%s", str(self.exp_pkt))
+ self.matched_packets = [p for p in self.captured_packets if match_exp_pkt(self.exp_pkt, p)]
+ logging.info("received %d matched packets", len(self.matched_packets))
+ if self.matched_packets:
+ logging.info(
+ "display the most recent matched captured packet:\n%s",
+ self._list_layer_str(self.matched_packets[-1])
+ )
+ if self.existing and not self.matched_packets:
+ raise ValueError("Failed to find expected packet.")
+ if not self.existing and self.matched_packets:
+ raise ValueError("Found expected packet.")
diff --git a/tests/common/dualtor/tunnel_traffic_utils.py b/tests/common/dualtor/tunnel_traffic_utils.py
new file mode 100644
index 00000000000..97e192a9620
--- /dev/null
+++ b/tests/common/dualtor/tunnel_traffic_utils.py
@@ -0,0 +1,161 @@
+"""Tunnel traffic verification utilities."""
+import ipaddress
+import logging
+import operator
+import pytest
+import sys
+
+from io import BytesIO
+from ptf import mask, testutils
+from scapy.all import IP, Ether
+from tests.common.dualtor import dual_tor_utils
+
+
+@pytest.fixture(scope="function")
+def tunnel_traffic_monitor(ptfadapter, tbinfo):
+ """Return TunnelTrafficMonitor to verify inter-ToR tunnel traffic."""
+
+ class TunnelTrafficMonitor(object):
+ """Monit tunnel traffic from standby ToR to active ToR."""
+
+ @staticmethod
+ def _get_t1_ptf_port_indexes(dut, tbinfo):
+ """Get the port indexes of those ptf port connecting to T1 switches."""
+ pc_ports = dual_tor_utils.get_t1_ptf_pc_ports(dut, tbinfo)
+ return [int(_.strip("eth")) for _ in reduce(operator.add, pc_ports.values(), [])]
+
+ @staticmethod
+ def _find_ipv4_lo_addr(config_facts):
+ """Find the ipv4 Loopback0 address."""
+ for addr in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]:
+ if isinstance(ipaddress.ip_network(addr), ipaddress.IPv4Network):
+ return addr.split("/")[0]
+
+ @staticmethod
+ def _build_tunnel_packet(outer_src_ip, outer_dst_ip):
+ """Build the expected tunnel packet."""
+ exp_pkt = testutils.simple_ip_packet(
+ ip_src=outer_src_ip,
+ ip_dst=outer_dst_ip,
+ pktlen=20
+ )
+ exp_pkt = mask.Mask(exp_pkt)
+ exp_pkt.set_do_not_care_scapy(Ether, "dst")
+ exp_pkt.set_do_not_care_scapy(Ether, "src")
+ exp_pkt.set_do_not_care_scapy(IP, "ihl")
+ exp_pkt.set_do_not_care_scapy(IP, "tos")
+ exp_pkt.set_do_not_care_scapy(IP, "len")
+ exp_pkt.set_do_not_care_scapy(IP, "id")
+ exp_pkt.set_do_not_care_scapy(IP, "flags")
+ exp_pkt.set_do_not_care_scapy(IP, "frag")
+ exp_pkt.set_do_not_care_scapy(IP, "ttl")
+ exp_pkt.set_do_not_care_scapy(IP, "proto")
+ exp_pkt.set_do_not_care_scapy(IP, "chksum")
+ exp_pkt.set_ignore_extra_bytes()
+ return exp_pkt
+
+ @staticmethod
+ def _dump_show_str(packet):
+ """Dump packet show output to string."""
+ _stdout, sys.stdout = sys.stdout, BytesIO()
+ try:
+ packet.show()
+ return sys.stdout.getvalue()
+ finally:
+ sys.stdout = _stdout
+
+ @staticmethod
+ def _check_ttl(packet):
+ """Check ttl field in the packet."""
+ outer_ttl, inner_ttl = packet[IP].ttl, packet[IP].payload[IP].ttl
+ logging.debug("Outer packet TTL: %s, inner packet TTL: %s", outer_ttl, inner_ttl)
+ if outer_ttl != 255:
+ return "outer packet's TTL expected TTL 255, actual %s" % outer_ttl
+ return ""
+
+ @staticmethod
+ def _check_tos(packet):
+ """Check ToS field in the packet."""
+
+ def _disassemble_ip_tos(tos):
+ return tos >> 2, tos & 0x3
+
+ outer_tos, inner_tos = packet[IP].tos, packet[IP].payload[IP].tos
+ outer_dscp, outer_ecn = _disassemble_ip_tos(outer_tos)
+ inner_dscp, inner_ecn = _disassemble_ip_tos(inner_tos)
+ logging.debug("Outer packet DSCP: {0:06b}, inner packet DSCP: {1:06b}".format(outer_dscp, inner_dscp))
+ logging.debug("Outer packet ECN: {0:02b}, inner packet ECN: {0:02b}".format(outer_ecn, inner_ecn))
+ check_res = []
+ if outer_dscp != inner_ecn:
+ check_res.append("outer packet DSCP not same as inner packet DSCP")
+ if outer_ecn != inner_ecn:
+ check_res.append("outer packet ECN not same as inner packet ECN")
+ return " ,".join(check_res)
+
+ def __init__(self, standby_tor, active_tor=None, existing=True):
+ """
+ Init the tunnel traffic monitor.
+
+ @param standby_tor: standby ToR that does the encap.
+ @param active_tor: active ToR that decaps the tunnel traffic.
+ """
+ self.active_tor = active_tor
+ self.standby_tor = standby_tor
+ self.listen_ports = sorted(self._get_t1_ptf_port_indexes(standby_tor, tbinfo))
+ self.ptfadapter = ptfadapter
+
+ standby_tor_cfg_facts = self.standby_tor.config_facts(
+ host=self.standby_tor.hostname, source="persistent"
+ )["ansible_facts"]
+ self.standby_tor_lo_addr = self._find_ipv4_lo_addr(standby_tor_cfg_facts)
+ if self.active_tor:
+ active_tor_cfg_facts = self.active_tor.config_facts(
+ host=self.active_tor.hostname, source="persistent"
+ )["ansible_facts"]
+ self.active_tor_lo_addr = self._find_ipv4_lo_addr(active_tor_cfg_facts)
+ else:
+ self.active_tor_lo_addr = [
+ _["address_ipv4"] for _ in standby_tor_cfg_facts["PEER_SWITCH"].values()
+ ][0]
+
+ self.exp_pkt = self._build_tunnel_packet(self.standby_tor_lo_addr, self.active_tor_lo_addr)
+ self.rec_pkt = None
+ self.existing = existing
+
+ def __enter__(self):
+ self.ptfadapter.dataplane.flush()
+
+ def __exit__(self, *exc_info):
+ if exc_info[0]:
+ return
+ try:
+ port_index, rec_pkt = testutils.verify_packet_any_port(
+ ptfadapter,
+ self.exp_pkt,
+ ports=self.listen_ports
+ )
+ except AssertionError as detail:
+ logging.debug("Error occurred in polling for tunnel traffic", exc_info=True)
+ if "Did not receive expected packet on any of ports" in str(detail):
+ if self.existing:
+ raise detail
+ else:
+ raise detail
+ else:
+ self.rec_pkt = Ether(rec_pkt)
+ rec_port = self.listen_ports[port_index]
+ logging.debug("Receive encap packet from PTF interface %s", "eth%s" % rec_port)
+ logging.debug("Encapsulated packet:\n%s", self._dump_show_str(self.rec_pkt))
+ if not self.existing:
+ raise RuntimeError("Detected tunnel traffic from host %s." % self.standby_tor.hostname)
+ ttl_check_res = self._check_ttl(self.rec_pkt)
+ tos_check_res = self._check_tos(self.rec_pkt)
+ check_res = []
+ if ttl_check_res:
+ check_res.append(ttl_check_res)
+ if tos_check_res:
+ check_res.append(tos_check_res)
+ if check_res:
+ raise ValueError(", ".join(check_res) + ".")
+
+ return TunnelTrafficMonitor
diff --git a/tests/common/fixtures/advanced_reboot.py b/tests/common/fixtures/advanced_reboot.py
index dae77f4a90e..8d0432564f9 100644
--- a/tests/common/fixtures/advanced_reboot.py
+++ b/tests/common/fixtures/advanced_reboot.py
@@ -79,6 +79,7 @@ def __extractTestParam(self):
self.readyTimeout = self.request.config.getoption("--ready_timeout")
self.replaceFastRebootScript = self.request.config.getoption("--replace_fast_reboot_script")
self.postRebootCheckScript = self.request.config.getoption("--post_reboot_check_script")
+ self.bgpV4V6TimeDiff = self.request.config.getoption("--bgp_v4_v6_time_diff")
# Set default reboot limit if it is not given
if self.rebootLimit is None:
@@ -482,6 +483,7 @@ def __runPtfRunner(self, rebootOper=None):
"setup_fdb_before_test" : True,
"vnet" : self.vnet,
"vnet_pkts" : self.vnetPkts,
+ "bgp_v4_v6_time_diff": self.bgpV4V6TimeDiff
},
log_file=u'/tmp/advanced-reboot.ReloadTest.log',
module_ignore_errors=self.moduleIgnoreErrors
diff --git a/tests/common/fixtures/ptfhost_utils.py b/tests/common/fixtures/ptfhost_utils.py
index 068a904c5db..922ba39933f 100644
--- a/tests/common/fixtures/ptfhost_utils.py
+++ b/tests/common/fixtures/ptfhost_utils.py
@@ -1,7 +1,9 @@
+import json
import os
import pytest
import logging
+from ipaddress import ip_interface
from jinja2 import Template
from natsort import natsorted
@@ -10,6 +12,7 @@
ROOT_DIR = "/root"
OPT_DIR = "/opt"
+TMP_DIR = '/tmp'
SUPERVISOR_CONFIG_DIR = "/etc/supervisor/conf.d/"
SCRIPTS_SRC_DIR = "scripts/"
TEMPLATES_DIR = "templates/"
@@ -21,6 +24,8 @@
ICMP_RESPONDER_CONF_TEMPL = "icmp_responder.conf.j2"
CHANGE_MAC_ADDRESS_SCRIPT = "scripts/change_mac.sh"
REMOVE_IP_ADDRESS_SCRIPT = "scripts/remove_ip.sh"
+GARP_SERVICE_PY = 'garp_service.py'
+GARP_SERVICE_CONF_TEMPL = 'garp_service.conf.j2'
@pytest.fixture(scope="session", autouse=True)
@@ -173,7 +178,7 @@ def run_icmp_responder(duthost, ptfhost, tbinfo):
logger.debug("Copy icmp_responder.py to ptfhost '{0}'".format(ptfhost.hostname))
ptfhost.copy(src=os.path.join(SCRIPTS_SRC_DIR, ICMP_RESPONDER_PY), dest=OPT_DIR)
- logging.debug("Start running icmp_responder")
+ logging.info("Start running icmp_responder")
templ = Template(open(os.path.join(TEMPLATES_DIR, ICMP_RESPONDER_CONF_TEMPL)).read())
ptf_indices = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_ptf_indices"]
vlan_intfs = duthost.get_vlan_intfs()
@@ -191,5 +196,39 @@ def run_icmp_responder(duthost, ptfhost, tbinfo):
yield
- logging.debug("Stop running icmp_responder")
+ logging.info("Stop running icmp_responder")
ptfhost.shell("supervisorctl stop icmp_responder")
+
+
+@pytest.fixture(scope='session', autouse=True)
+def run_garp_service(duthost, ptfhost, tbinfo, change_mac_addresses):
+
+ garp_config = {}
+
+ ptf_indices = duthost.get_extended_minigraph_facts(tbinfo)["minigraph_ptf_indices"]
+ mux_cable_table = duthost.get_running_config_facts()['MUX_CABLE']
+
+ logger.info("Generating GARP service config file")
+
+ for vlan_intf, config in mux_cable_table.items():
+ ptf_port_index = ptf_indices[vlan_intf]
+ server_ip = ip_interface(config['server_ipv4']).ip
+
+ garp_config[ptf_port_index] = {
+ 'target_ip': '{}'.format(server_ip)
+ }
+
+ ptfhost.copy(src=os.path.join(SCRIPTS_SRC_DIR, GARP_SERVICE_PY), dest=OPT_DIR)
+
+ with open(os.path.join(TEMPLATES_DIR, GARP_SERVICE_CONF_TEMPL)) as f:
+ template = Template(f.read())
+
+ ptfhost.copy(content=json.dumps(garp_config, indent=4, sort_keys=True), dest=os.path.join(TMP_DIR, 'garp_conf.json'))
+ ptfhost.copy(content=template.render(garp_service_args = '--interval 1'), dest=os.path.join(SUPERVISOR_CONFIG_DIR, 'garp_service.conf'))
+ logger.info("Starting GARP Service on PTF host")
+ ptfhost.shell('supervisorctl update')
+ ptfhost.shell('supervisorctl start garp_service')
+
+ yield
+
+ ptfhost.shell('supervisorctl stop garp_service')
diff --git a/tests/common/helpers/drop_counters/__init__.py b/tests/common/helpers/drop_counters/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/common/helpers/drop_counters/drop_counters.py b/tests/common/helpers/drop_counters/drop_counters.py
new file mode 100644
index 00000000000..91539b4c2db
--- /dev/null
+++ b/tests/common/helpers/drop_counters/drop_counters.py
@@ -0,0 +1,97 @@
+import logging
+import re
+import json
+import pytest
+from tests.common.utilities import wait_until
+
+logger = logging.getLogger(__name__)
+
+# CLI commands to obtain drop counters.
+NAMESPACE_PREFIX = "sudo ip netns exec {} "
+NAMESPACE_SUFFIX = "-n {} "
+GET_L2_COUNTERS = "portstat -j "
+GET_L3_COUNTERS = "intfstat -j "
+ACL_COUNTERS_UPDATE_INTERVAL = 10
+LOG_EXPECT_ACL_RULE_CREATE_RE = ".*Successfully created ACL rule.*"
+LOG_EXPECT_ACL_RULE_REMOVE_RE = ".*Successfully deleted ACL rule.*"
+LOG_EXPECT_PORT_ADMIN_DOWN_RE = ".*Configure {} admin status to down.*"
+LOG_EXPECT_PORT_ADMIN_UP_RE = ".*Port {} oper state set from down to up.*"
+RX_DRP = "RX_DRP"
+RX_ERR = "RX_ERR"
+
+COMBINED_L2L3_DROP_COUNTER = False
+COMBINED_ACL_DROP_COUNTER = False
+
+
+def get_pkt_drops(duthost, cli_cmd, asic_index):
+ """
+ @summary: Parse output of "portstat" or "intfstat" commands and convert it to the dictionary.
+ @param module: The AnsibleModule object
+ @param cli_cmd: one of supported CLI commands - "portstat -j" or "intfstat -j"
+ @return: Return dictionary of parsed counters
+ """
+ # Get namespace from asic_index.
+ namespace = duthost.get_namespace_from_asic_id(asic_index)
+
+ # Frame the correct cli command
+ # the L2 commands need _SUFFIX and L3 commands need _PREFIX
+ if cli_cmd == GET_L3_COUNTERS:
+ CMD_PREFIX = NAMESPACE_PREFIX if duthost.is_multi_asic else ''
+ cli_cmd = CMD_PREFIX + cli_cmd
+ elif cli_cmd == GET_L2_COUNTERS:
+ CMD_SUFFIX = NAMESPACE_SUFFIX if duthost.is_multi_asic else ''
+ cli_cmd = cli_cmd + CMD_SUFFIX
+
+ stdout = duthost.command(cli_cmd.format(namespace))
+ stdout = stdout["stdout"]
+ match = re.search("Last cached time was.*\n", stdout)
+ if match:
+ stdout = re.sub("Last cached time was.*\n", "", stdout)
+
+ try:
+ return json.loads(stdout)
+ except Exception as err:
+ raise Exception("Failed to parse output of '{}', err={}".format(cli_cmd, str(err)))
+
+
+def ensure_no_l3_drops(duthost, asic_index, packets_count):
+ """ Verify L3 drop counters were not incremented """
+ intf_l3_counters = get_pkt_drops(duthost, GET_L3_COUNTERS, asic_index)
+ unexpected_drops = {}
+ for iface, value in intf_l3_counters.items():
+ try:
+ rx_err_value = int(value[RX_ERR])
+ except ValueError as err:
+ logger.info("Unable to verify L3 drops on iface {}, L3 counters may not be supported on this platform\n{}".format(iface, err))
+ continue
+ if rx_err_value >= packets_count:
+ unexpected_drops[iface] = rx_err_value
+ if unexpected_drops:
+ pytest.fail("L3 'RX_ERR' was incremented for the following interfaces:\n{}".format(unexpected_drops))
+
+
+def ensure_no_l2_drops(duthost, asic_index, packets_count):
+ """ Verify L2 drop counters were not incremented """
+ intf_l2_counters = get_pkt_drops(duthost, GET_L2_COUNTERS, asic_index)
+ unexpected_drops = {}
+ for iface, value in intf_l2_counters.items():
+ try:
+ rx_drp_value = int(value[RX_DRP])
+ except ValueError as err:
+ logger.warning("Unable to verify L2 drops on iface {}\n{}".format(iface, err))
+ continue
+ if rx_drp_value >= packets_count:
+ unexpected_drops[iface] = rx_drp_value
+ if unexpected_drops:
+ pytest.fail("L2 'RX_DRP' was incremented for the following interfaces:\n{}".format(unexpected_drops))
+
+
+def verify_drop_counters(duthost, asic_index, dut_iface, get_cnt_cli_cmd, column_key, packets_count):
+ """ Verify drop counter incremented on specific interface """
+ get_drops = lambda: int(get_pkt_drops(duthost, get_cnt_cli_cmd, asic_index)[dut_iface][column_key].replace(",", ""))
+ check_drops_on_dut = lambda: packets_count == get_drops()
+ if not wait_until(5, 1, check_drops_on_dut):
+ fail_msg = "'{}' drop counter was not incremented on iface {}. DUT {} == {}; Sent == {}".format(
+ column_key, dut_iface, column_key, get_drops(), packets_count
+ )
+ pytest.fail(fail_msg)
diff --git a/tests/common/helpers/dut_utils.py b/tests/common/helpers/dut_utils.py
index ca97682505e..1bbde61e5ef 100644
--- a/tests/common/helpers/dut_utils.py
+++ b/tests/common/helpers/dut_utils.py
@@ -1,5 +1,8 @@
+import logging
+
from tests.common.utilities import get_host_visible_vars
+logger = logging.getLogger(__name__)
def is_supervisor_node(inv_files, hostname):
"""Check if the current node is a supervisor node in case of multi-DUT.
@@ -27,3 +30,72 @@ def is_frontend_node(inv_files, hostname):
node. If we add more types of nodes, then we need to exclude them from this method as well.
"""
return not is_supervisor_node(inv_files, hostname)
+
+
+def get_group_program_info(duthost, container_name, group_name):
+ """Gets program names, running status and their pids by analyzing the command
+ output of "docker exec supervisorctl status". Program name
+ at here represents a program which is part of group
+
+ Args:
+ duthost: Hostname of DUT.
+ container_name: A string shows container name.
+ program_name: A string shows process name.
+
+ Returns:
+ A dictionary where keys are the program names and values are their running
+ status and pids.
+ """
+ group_program_info = defaultdict(list)
+ program_name = None
+ program_status = None
+ program_pid = None
+
+ program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
+ for program_info in program_list["stdout_lines"]:
+ if program_info.find(group_name) != -1:
+ program_name = program_info.split()[0].split(':')[1].strip()
+ program_status = program_info.split()[1].strip()
+ if program_status in ["EXITED", "STOPPED", "STARTING"]:
+ program_pid = -1
+ else:
+ program_pid = int(program_info.split()[3].strip(','))
+
+ group_program_info[program_name].append(program_status)
+ group_program_info[program_name].append(program_pid)
+
+ if program_pid != -1:
+ logger.info("Found program '{}' in the '{}' state with pid {}"
+ .format(program_name, program_status, program_pid))
+
+ return group_program_info
+
+
+def get_program_info(duthost, container_name, program_name):
+ """Gets program running status and its pid by analyzing the command
+ output of "docker exec supervisorctl status"
+
+ Args:
+ duthost: Hostname of DUT.
+ container_name: A string shows container name.
+ program_name: A string shows process name.
+
+ Return:
+ Program running status and its pid.
+ """
+ program_status = None
+ program_pid = -1
+
+ program_list = duthost.shell("docker exec {} supervisorctl status".format(container_name), module_ignore_errors=True)
+ for program_info in program_list["stdout_lines"]:
+ if program_info.find(program_name) != -1:
+ program_status = program_info.split()[1].strip()
+ if program_status == "RUNNING":
+ program_pid = int(program_info.split()[3].strip(','))
+ break
+
+ if program_pid != -1:
+ logger.info("Found program '{}' in the '{}' state with pid {}"
+ .format(program_name, program_status, program_pid))
+
+ return program_status, program_pid
diff --git a/tests/common/helpers/generators.py b/tests/common/helpers/generators.py
index b344a6a8674..fa820eaf427 100755
--- a/tests/common/helpers/generators.py
+++ b/tests/common/helpers/generators.py
@@ -1,4 +1,7 @@
from netaddr import IPNetwork
+import json
+
+ZERO_ADDR = r'0.0.0.0/0'
def generate_ips(num, prefix, exclude_ips):
""" Generate random ips within prefix """
@@ -18,3 +21,34 @@ def generate_ips(num, prefix, exclude_ips):
break
return generated_ips
+
+
+def route_through_default_routes(host, ip_addr):
+ """
+ @summary: Check if a given ip targets to default route
+ @param host: The duthost
+ @param ip_addr: The ip address to check
+ @return: True if the given up goes to default route, False otherwise
+ """
+ output = host.shell("show ip route {} json".format(ip_addr))['stdout']
+ routes_info = json.loads(output)
+ ret = True
+
+ for prefix in routes_info.keys():
+ if prefix != ZERO_ADDR:
+ ret = False
+ break
+ return ret
+
+
+def generate_ip_through_default_route(host):
+ """
+ @summary: Generate a random IP address routed through default routes
+ @param host: The duthost
+ @return: A str, on None if non ip is found in given range
+ """
+ for leading in range(11, 255):
+ ip_addr = generate_ips(1, "{}.0.0.1/24".format(leading), [])[0]
+ if route_through_default_routes(host, ip_addr):
+ return ip_addr
+ return None
diff --git a/tests/common/helpers/redis.py b/tests/common/helpers/redis.py
new file mode 100644
index 00000000000..9ce7f4505fb
--- /dev/null
+++ b/tests/common/helpers/redis.py
@@ -0,0 +1,392 @@
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class RedisCli(object):
+ """Base class for interface to RedisDb using redis-cli command.
+
+ Attributes:
+ host: a SonicHost or SonicAsic. Commands will be run on this shell.
+ database: Redis database number.
+ pid: Port number of redis db.
+ """
+
+ def __init__(self, host, database=1, pid=6379):
+ """Initializes base class with defaults"""
+ self.host = host
+ self.database = database
+ self.pid = pid
+
+ def _cli_prefix(self):
+ """Builds opening of redis CLI command for other methods."""
+ # return "docker exec -i {docker} redis-cli -p {pid} --raw -n {db} ".format(
+ # docker=self.docker, db=self.database, pid=self.pid)
+ return " -p {pid} --raw -n {db} ".format(db=self.database, pid=self.pid)
+
+ def _run_and_check(self, cmd):
+ """
+ Executes a redis CLI command and checks the output for empty string.
+
+ Args:
+ cmd: Full CLI command to run.
+
+ Returns:
+ Ansible CLI output dictionary with stdout and stdout_lines keys on success.
+ Empty dictionary on error.
+
+ """
+ result = self.host.run_redis_cli_cmd(cmd)
+
+ if len(result["stdout_lines"]) == 0:
+ logger.error("No command response: %s" % cmd)
+ return {}
+
+ return result
+
+ def _run_and_raise(self, cmd):
+ """
+ Executes a redis CLI command and checks the output for empty string.
+
+ Args:
+ cmd: Full CLI command to run.
+
+ Returns:
+ Ansible CLI output dictionary with stdout and stdout_lines keys on success.
+
+ Raises:
+ Exception: If the command had no output.
+
+ """
+ result = self.host.run_redis_cli_cmd(cmd)
+
+ if len(result["stdout_lines"]) == 0:
+ logger.error("No command response: %s" % cmd)
+ raise Exception("Command: %s returned no response." % cmd)
+
+ return result
+
+ def get_key_value(self, key):
+ """
+ Executes a redis CLI get command.
+
+ Args:
+ key: full name of the key to get.
+
+ Returns:
+ The corresponding value of the key.
+
+ Raises:
+ RedisKeyNotFound: If the key has no value or is not present.
+
+ """
+ cmd = self._cli_prefix() + "get " + key
+ result = self._run_and_check(cmd)
+ if result == {}:
+ raise RedisKeyNotFound("Key: %s not found in rediscmd: %s" % (key, cmd))
+ else:
+ return result['stdout']
+
+ def hget_key_value(self, key, field):
+ """
+ Executes a redis CLI hget command.
+
+ Args:
+ key: full name of the key to get.
+ field: Name of the hash field to get.
+
+ Returns:
+ The corresponding value of the key.
+
+ Raises:
+ RedisKeyNotFound: If the key or field has no value or is not present.
+
+ """
+ cmd = self._cli_prefix() + "hget {} {}".format(key, field)
+ result = self._run_and_check(cmd)
+ if result == {}:
+ raise RedisKeyNotFound("Key: %s, field: %s not found in rediscmd: %s" % (key, field, cmd))
+ else:
+ return result['stdout']
+
+ def get_and_check_key_value(self, key, value, field=None):
+ """
+ Executes a redis CLI get or hget and validates the response against a provided field.
+
+ Args:
+ key: full name of the key to get.
+ value: expected value to test against.
+ field: Optional; Name of the hash field to use with hget.
+
+ Returns:
+ True if the validation succeeds.
+
+ Raises:
+ RedisKeyNotFound: If the key or field has no value or is not present.
+ AssertionError: If the fetched value from redis does not match the provided value.
+
+ """
+ if field is None:
+ result = self.get_key_value(key)
+ else:
+ result = self.hget_key_value(key, field)
+
+ if str(result).lower() == str(value).lower():
+ logger.info("Value {val} matches output {out}".format(val=value, out=result))
+ return True
+ else:
+ raise AssertionError("redis value error: %s != %s key was: %s" % (result, value, key))
+
+
+class AsicDbCli(RedisCli):
+ """
+ Class to interface with the ASICDB on a host.
+
+ Attributes:
+ host: a SonicHost or SonicAsic. Commands will be run on this shell.
+
+ """
+ ASIC_SWITCH_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH"
+ ASIC_SYSPORT_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT"
+ ASIC_PORT_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_PORT"
+ ASIC_HOSTIF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_HOSTIF"
+ ASIC_ROUTERINTF_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE"
+ ASIC_NEIGH_ENTRY_TABLE = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY"
+
+ def __init__(self, host):
+ """
+ Initializes a connection to the ASIC DB (database 1)
+ """
+ super(AsicDbCli, self).__init__(host, 1)
+ # cache this to improve speed
+ self.hostif_portidlist = []
+
+ def get_switch_key(self):
+ """Returns a list of keys in the switch table"""
+ cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_SWITCH_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"][0]
+
+ def get_system_port_key_list(self):
+ """Returns a list of keys in the system port table"""
+ cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_SYSPORT_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_port_key_list(self):
+ """Returns a list of keys in the local port table"""
+ cmd = self._cli_prefix() + "KEYS %s*" % AsicDbCli.ASIC_PORT_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_hostif_list(self):
+ """Returns a list of keys in the host interface table"""
+ cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_HOSTIF_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_router_if_list(self):
+ """Returns a list of keys in the router interface table"""
+ cmd = self._cli_prefix() + "KEYS %s:*" % AsicDbCli.ASIC_ROUTERINTF_TABLE
+ return self._run_and_raise(cmd)["stdout_lines"]
+
+ def get_neighbor_key_by_ip(self, ipaddr):
+ """Returns the key in the neighbor table that is for a specific IP neighbor
+
+ Args:
+ ipaddr: The IP address to search for in the neighbor table.
+
+ """
+ result = self._run_and_raise(self._cli_prefix() + "KEYS %s*%s*" % (AsicDbCli.ASIC_NEIGH_ENTRY_TABLE, ipaddr))
+ neighbor_key = None
+ match_str = '"ip":"%s"' % ipaddr
+ for key in result["stdout_lines"]:
+ if match_str in key:
+ neighbor_key = key
+ break
+
+ return neighbor_key
+
+ def get_neighbor_value(self, neighbor_key, field):
+ """
+ Returns a value of a field in the neighbor table.
+
+ Note:
+ The structure of the keys in this table cause the command() method to fail, so this function uses shell() to
+ retrieve the command output.
+
+ Args:
+ neighbor_key: The full key of the neighbor table.
+ field: The field to get in the neighbor hash table.
+ """
+ cmd = ["/usr/bin/redis-cli", "-n", "1", "HGET", neighbor_key, field]
+ if self.host.namespace is not None:
+ cmd = ["sudo", "ip", "netns", "exec"] + cmd
+ result = self.host.sonichost.shell(argv=cmd)
+ logger.debug("neigh result: %s", result['stdout'])
+ return result['stdout']
+
+ def get_hostif_portid_oidlist(self, refresh=False):
+ """
+ Returns a list of portids associated with the hostif entries on the asics.
+
+ Walks through the HOSTIF table getting each port ID from the cache and returns the list. The list
+ is saved so it can be returned directly in subsequent calls.
+
+ Args:
+ refresh: Forces the redis DB to be requeried after the first time.
+
+
+ """
+ if self.hostif_portidlist != [] and refresh is False:
+ return self.hostif_portidlist
+
+ hostif_keylist = self.get_hostif_list()
+ return_list = []
+ for hostif_key in hostif_keylist:
+ hostif_portid = self.hget_key_value(hostif_key, 'SAI_HOSTIF_ATTR_OBJ_ID')
+ return_list.append(hostif_portid)
+ self.hostif_portidlist = return_list
+ return return_list
+
+ def find_hostif_by_portid(self, portid):
+ """
+ Returns an HOSTIF table key for the port specified.
+
+ Args:
+ portid: A port OID (oid:0x1000000000004)
+
+ Raises:
+ RedisKeyNotFound: If no hostif exists with the portid provided.
+ """
+ hostif_keylist = self.get_hostif_list()
+ for hostif_key in hostif_keylist:
+ hostif_portid = self.hget_key_value(hostif_key, 'SAI_HOSTIF_ATTR_OBJ_ID')
+ if hostif_portid == portid:
+ return hostif_key
+
+ raise RedisKeyNotFound("Can't find hostif in asicdb with portid: %s", portid)
+
+ def get_rif_porttype(self, portid):
+ """
+ Determines whether a specific port OID referenced in a router interface entry is a local port or a system port.
+
+ Args:
+ portid: the port oid from SAI_ROUTER_INTERFACE_ATTR_PORT_ID (oid:0x6000000000c4d)
+
+ Returns:
+ "hostif" if the port ID has a host interface
+ "sysport" if it is a system port.
+ "port" if the port ID is in local port table but has no hostif
+ "other" if it is not found in any port table
+ """
+ # could be a localport
+ if "%s:%s" % (
+ AsicDbCli.ASIC_PORT_TABLE,
+ portid) in self.get_port_key_list() and portid in self.get_hostif_portid_oidlist():
+ return "hostif"
+ # could be a system port
+ elif "%s:%s" % (AsicDbCli.ASIC_SYSPORT_TABLE, portid) in self.get_system_port_key_list():
+ return "sysport"
+ # could be something else
+ elif "%s:%s" % (AsicDbCli.ASIC_PORT_TABLE, portid) in self.get_port_key_list():
+ return "port"
+ else:
+ return "other"
+
+
+class AppDbCli(RedisCli):
+ """
+ Class to interface with the APPDB on a host.
+
+ Attributes:
+ host: a SonicHost or SonicAsic. Commands will be run on this shell.
+
+ """
+ APP_NEIGH_TABLE = "NEIGH_TABLE"
+
+ def __init__(self, host):
+ super(AppDbCli, self).__init__(host, 0)
+
+ def get_neighbor_key_by_ip(self, ipaddr):
+ """Returns the key in the neighbor table that is for a specific IP neighbor
+
+ Args:
+ ipaddr: The IP address to search for in the neighbor table.
+
+ """
+ result = self._run_and_raise(self._cli_prefix() + "KEYS %s:*%s*" % (AppDbCli.APP_NEIGH_TABLE, ipaddr))
+ neighbor_key = None
+ for key in result["stdout_lines"]:
+ if key.endswith(ipaddr):
+ neighbor_key = key
+ break
+
+ return neighbor_key
+
+
+class VoqDbCli(RedisCli):
+ """
+ Class to interface with the Chassis VOQ DB on a supervisor.
+
+ Attributes:
+ host: a SonicHost instance for a supervisor card. Commands will be run on this shell.
+
+ """
+
+ def __init__(self, host):
+ """Initializes the class with the database parameters and finds the IP address of the database"""
+ super(VoqDbCli, self).__init__(host, 12, 6380)
+ output = host.command("grep chassis_db_address /etc/sonic/chassisdb.conf")
+ # chassis_db_address=10.0.0.16
+ self.ip = output['stdout'].split("=")[1]
+
+ def _cli_prefix(self):
+ """Builds opening of redis CLI command for other methods."""
+ return "-h {ip} -p {pid} --raw -n {db} ".format(
+ ip=self.ip, db=self.database, pid=self.pid)
+
+ def get_neighbor_key_by_ip(self, ipaddr):
+ """Returns the key in the neighbor table that is for a specific IP neighbor
+
+ Args:
+ ipaddr: The IP address to search for in the neighbor table.
+
+ """
+ cmd = self._cli_prefix() + 'KEYS "SYSTEM_NEIGH|*%s*"' % ipaddr
+ result = self._run_and_raise(cmd)
+ neighbor_key = None
+ for key in result["stdout_lines"]:
+ if key.endswith(ipaddr):
+ neighbor_key = key
+ break
+
+ return neighbor_key
+
+ def get_router_interface_id(self, slot, asic, port):
+ """Returns the router OID stored in the router interface table entry for the provided entry.
+
+ Args:
+ slot: slot of the router interface in either numeric or text. (3 or Slot3)
+ asic: ASIC number of the router interface in either numeric or text (0 or Asic0)
+ port: Full text of port (Ethernet17)
+
+
+ """
+ slot = str(slot)
+ if slot.isdigit():
+ slot_str = "Linecard" + slot
+ else:
+ slot_str = slot
+
+ asic = str(asic)
+ if asic.isdigit():
+ asic_str = "Asic" + asic
+ else:
+ asic_str = asic
+
+ key = "SYSTEM_INTERFACE|{}|{}|{}".format(slot_str, asic_str, port)
+ return self.hget_key_value(key, "rif_id")
+
+
+class RedisKeyNotFound(KeyError):
+ """
+ Raised when requested keys or fields are not found in the redis db.
+ """
+ pass
diff --git a/tests/common/platform/interface_utils.py b/tests/common/platform/interface_utils.py
index 061facb97d8..5e758cb9be3 100644
--- a/tests/common/platform/interface_utils.py
+++ b/tests/common/platform/interface_utils.py
@@ -39,7 +39,7 @@ def check_interface_status(dut, asic_index, interfaces, xcvr_skip_list):
@param dut: The AnsibleHost object of DUT. For interacting with DUT.
@param interfaces: List of interfaces that need to be checked.
"""
- asichost = dut.get_asic(asic_index)
+ asichost = dut.asic_instance(asic_index)
namespace = asichost.get_asic_namespace()
logging.info("Check interface status using cmd 'show interface'")
#TODO Remove this logic when minigraph facts supports namespace in multi_asic
diff --git a/tests/common/platform/ssh_utils.py b/tests/common/platform/ssh_utils.py
index 3e9637c9d6d..a290d7d5c33 100644
--- a/tests/common/platform/ssh_utils.py
+++ b/tests/common/platform/ssh_utils.py
@@ -43,3 +43,16 @@ def prepare_testbed_ssh_keys(duthost, ptfhost, dut_username):
chown -R {0}:{0} /home/{0}/.ssh/
'''.format(dut_username, result['public_key'])
duthost.shell(cmd)
+
+
+def ssh_authorize_local_user(duthost):
+ """
+ Generate public private key and authorize user on the host.
+ Used to ssh into localhost without password
+ """
+ logger.info("Remove old keys from DUT")
+ duthost.shell("mkdir -p /root/.ssh")
+ duthost.shell("rm -f /root/.ssh/known_hosts")
+ duthost.shell("rm -f /root/.ssh/id_rsa*")
+ duthost.shell("ssh-keygen -q -t rsa -N '' -f /root/.ssh/id_rsa")
+ duthost.shell("cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys")
diff --git a/tests/common/platform/transceiver_utils.py b/tests/common/platform/transceiver_utils.py
index 6cedabb9ba6..2d7582dbddf 100644
--- a/tests/common/platform/transceiver_utils.py
+++ b/tests/common/platform/transceiver_utils.py
@@ -42,7 +42,7 @@ def all_transceivers_detected(dut, asic_index, interfaces, xcvr_skip_list):
Check if transceiver information of all the specified interfaces have been detected.
"""
cmd = "redis-cli --raw -n 6 keys TRANSCEIVER_INFO\*"
- asichost = dut.get_asic(asic_index)
+ asichost = dut.asic_instance(asic_index)
docker_cmd = asichost.get_docker_cmd(cmd, "database")
db_output = dut.command(docker_cmd)["stdout_lines"]
not_detected_interfaces = [intf for intf in interfaces if "TRANSCEIVER_INFO|%s" % intf not in db_output]
@@ -60,7 +60,7 @@ def check_transceiver_basic(dut, asic_index, interfaces, xcvr_skip_list):
"""
logging.info("Check whether transceiver information of all ports are in redis")
cmd = "redis-cli -n 6 keys TRANSCEIVER_INFO*"
- asichost = dut.get_asic(asic_index)
+ asichost = dut.asic_instance(asic_index)
docker_cmd = asichost.get_docker_cmd(cmd, "database")
xcvr_info = dut.command(docker_cmd)
parsed_xcvr_info = parse_transceiver_info(xcvr_info["stdout_lines"])
@@ -75,7 +75,7 @@ def check_transceiver_details(dut, asic_index, interfaces, xcvr_skip_list):
@param dut: The AnsibleHost object of DUT. For interacting with DUT.
@param interfaces: List of interfaces that need to be checked.
"""
- asichost = dut.get_asic(asic_index)
+ asichost = dut.asic_instance(asic_index)
logging.info("Check detailed transceiver information of each connected port")
expected_fields = ["type", "hardware_rev", "serial", "manufacturer", "model"]
for intf in interfaces:
@@ -96,7 +96,7 @@ def check_transceiver_dom_sensor_basic(dut, asic_index, interfaces, xcvr_skip_li
"""
logging.info("Check whether TRANSCEIVER_DOM_SENSOR of all ports in redis")
cmd = "redis-cli -n 6 keys TRANSCEIVER_DOM_SENSOR*"
- asichost = dut.get_asic(asic_index)
+ asichost = dut.asic_instance(asic_index)
docker_cmd = asichost.get_docker_cmd(cmd, "database")
xcvr_dom_sensor = dut.command(docker_cmd)
parsed_xcvr_dom_sensor = parse_transceiver_dom_sensor(xcvr_dom_sensor["stdout_lines"])
@@ -112,7 +112,7 @@ def check_transceiver_dom_sensor_details(dut, asic_index, interfaces, xcvr_skip_
@param interfaces: List of interfaces that need to be checked.
"""
logging.info("Check detailed TRANSCEIVER_DOM_SENSOR information of each connected ports")
- asichost = dut.get_asic(asic_index)
+ asichost = dut.asic_instance(asic_index)
expected_fields = ["temperature", "voltage", "rx1power", "rx2power", "rx3power", "rx4power", "tx1bias",
"tx2bias", "tx3bias", "tx4bias", "tx1power", "tx2power", "tx3power", "tx4power"]
for intf in interfaces:
diff --git a/tests/common/plugins/pdu_controller/__init__.py b/tests/common/plugins/pdu_controller/__init__.py
index 6a2cc651557..35d1d846eb4 100644
--- a/tests/common/plugins/pdu_controller/__init__.py
+++ b/tests/common/plugins/pdu_controller/__init__.py
@@ -1,23 +1,14 @@
import logging
import pytest
+from pdu_manager import pdu_manager_factory
-def pdu_controller_factory(controller_ip, controller_protocol, dut_hostname, pdu):
- """
- @summary: Factory function for creating PDU controller according to different management protocol.
- @param controller_ip: IP address of the PDU controller host.
- @param controller_protocol: Management protocol supported by the PDU controller host.
- @param dut_hostname: Hostname of the DUT to be controlled by the PDU controller.
- """
- logging.info("Creating pdu controller object")
- if controller_protocol == "snmp":
- import snmp_pdu_controllers
- return snmp_pdu_controllers.get_pdu_controller(controller_ip, dut_hostname, pdu)
+logger = logging.getLogger(__name__)
@pytest.fixture(scope="module")
-def pdu_controller(duthosts, rand_one_dut_hostname, pdu):
+def pdu_controller(duthosts, rand_one_dut_hostname, conn_graph_facts, pdu):
"""
@summary: Fixture for controlling power supply to PSUs of DUT
@param duthost: Fixture duthost defined in sonic-mgmt/tests/conftest.py
@@ -25,39 +16,18 @@ def pdu_controller(duthosts, rand_one_dut_hostname, pdu):
controller_base.py.
"""
duthost = duthosts[rand_one_dut_hostname]
-
- logging.info("Creating pdu_controller fixture")
inv_mgr = duthost.host.options["inventory_manager"]
- pdu_host = inv_mgr.get_host(duthost.hostname).get_vars().get("pdu_host")
- if not pdu_host:
- logging.info("No 'pdu_host' is defined in inventory file for '%s'. Unable to create pdu_controller" %
- duthost.hostname)
- yield None
- return
-
- controller_vars = inv_mgr.get_host(pdu_host).get_vars()
-
- controller_ip = controller_vars.get("ansible_host")
- if not controller_ip:
- logging.info("No 'ansible_host' is defined in inventory file for '%s'" % pdu_host)
- logging.info("Unable to create pdu_controller for %s" % duthost.hostname)
- yield None
- return
-
- controller_protocol = controller_vars.get("protocol")
- if not controller_protocol:
- logging.info("No protocol is defined in inventory file for '%s'. Try to use default 'snmp'" % pdu_host)
- controller_protocol = "snmp"
+ pdu_host_list = inv_mgr.get_host(duthost.hostname).get_vars().get("pdu_host")
+ pdu_hosts = {}
+ for ph in pdu_host_list.split(','):
+ var_list = inv_mgr.get_host(ph).get_vars()
+ pdu_hosts[ph] = var_list
- controller = pdu_controller_factory(controller_ip, controller_protocol, duthost.hostname, pdu)
+ controller = pdu_manager_factory(duthost.hostname, pdu_hosts, conn_graph_facts, pdu)
yield controller
- logging.info("pdu_controller fixture teardown, ensure that all PDU outlets are turned on after test")
+ logger.info("pdu_controller fixture teardown, ensure that all PDU outlets are turned on after test")
if controller:
- outlet_status = controller.get_outlet_status()
- if outlet_status:
- for outlet in outlet_status:
- if not outlet["outlet_on"]:
- controller.turn_on_outlet(outlet["outlet_id"])
+ controller.turn_on_outlet()
controller.close()
diff --git a/tests/common/plugins/pdu_controller/controller_base.py b/tests/common/plugins/pdu_controller/controller_base.py
index d26b669dd16..3880d0f9f8d 100644
--- a/tests/common/plugins/pdu_controller/controller_base.py
+++ b/tests/common/plugins/pdu_controller/controller_base.py
@@ -40,14 +40,15 @@ def turn_off_outlet(self, outlet):
"""
raise NotImplementedError
- def get_outlet_status(self, outlet=None):
+ def get_outlet_status(self, outlet=None, hostname=None):
"""
@summary: Get current power status of PDU outlets
@param outlet: Optional outlet ID, it could be integer or string digit. If no outlet is specified, power status of
all PDU outlets should be returned
+ @param hostname: Optional hostname used to partial match any label
@return: Returns a list of dictionaries. For example:
- [{"outlet_id": 0, "outlet_on": True}, {"outlet_id": 1, "outlet_on": True}]
+ [{"outlet_id": "0.0.1", "outlet_on": True}, {"outlet_id": "0.0.2", "outlet_on": True}]
If getting outlet(s) status failed, an empty list should be returned.
"""
raise NotImplementedError
diff --git a/tests/common/plugins/pdu_controller/pdu_manager.py b/tests/common/plugins/pdu_controller/pdu_manager.py
new file mode 100644
index 00000000000..c919dd3aa86
--- /dev/null
+++ b/tests/common/plugins/pdu_controller/pdu_manager.py
@@ -0,0 +1,258 @@
+"""
+ PduManager is intended to solve the issue where DUT connects to
+ multiple PDU controllers.
+
+ It also intended to hide the dependency on the fake outlet_id,
+ and reference outlet buy outlet dictionary directly. With this,
+ we could enable different way to identify outlet, e.g. with the
+ outlet number from graph.
+
+ It also intended to create a smooth transition from defining
+ PDU in inventory to defining PDU in connection graph. Data in
+ graph is preferred, but if graph data is missing, existing
+ inventory data will be used.
+
+ PDU manager implements the same base PDU controller APIs and
+ collect status from and distribute operations to individual PDU
+ controllers.
+"""
+
+import logging
+import copy
+from snmp_pdu_controllers import get_pdu_controller
+
+logger = logging.getLogger(__name__)
+
+
+class PduManager():
+
+ def __init__(self, dut_hostname):
+ """
+ dut_hostname is the target DUT host name. The dut
+ defines which PDU(s) and outlet(s) it connected to.
+
+ It is NOT the PDU host name. PDU host name is defined
+ either in graph or in inventory and associated with
+ the DUT.
+ """
+ self.dut_hostname = dut_hostname
+ """
+ controlers is an array of controller dictionaries with
+ following information:
+ {
+ 'psu_name' : name of the PSU on DUT,
+ 'host' : controller_IP_address,
+ 'controller' : controller instance,
+ 'outlets' : cached outlet status,
+ 'psu_peer' : psu peer information,
+ }
+ """
+ self.controllers = []
+
+ def _update_outlets(self, outlets, pdu_index):
+ for outlet in outlets:
+ outlet['pdu_index'] = pdu_index
+ outlet['pdu_name'] = self.controllers[pdu_index]['psu_peer']['peerdevice']
+
+ def add_controller(self, psu_name, psu_peer, pdu_vars):
+ """
+ Add a controller to be managed.
+ Sampel psu_peer:
+ {
+ "peerdevice": "pdu-107",
+ "HwSku": "Sentry",
+ "Protocol": "snmp",
+ "ManagementIp": "10.0.0.107",
+ "Type": "Pdu",
+ "peerport": "39"
+ }
+ """
+ if 'Protocol' not in psu_peer or 'ManagementIp' not in psu_peer:
+ logger.info('psu_peer {} missing critical inforamtion'.format(psu_peer))
+ return
+
+ if psu_peer['Protocol'] != 'snmp':
+ logger.warning('Controller protocol {} is not supported'.format(protocol))
+ return
+
+ controller = None
+ pdu_ip = psu_peer['ManagementIp']
+ shared_pdu = False
+ for pdu in self.controllers:
+ if psu_name in pdu:
+ logger.warning('PSU {} already has a pdu definition'.format(psu_name))
+ return
+ if pdu_ip == pdu['host']:
+ shared_pdu = True # Sharing controller with another outlet
+ controller = pdu['controller']
+
+ outlets = []
+ pdu = {
+ 'psu_name': psu_name,
+ 'host': pdu_ip,
+ 'controller': controller,
+ 'outlets': outlets,
+ 'psu_peer': psu_peer,
+ }
+ next_index = len(self.controllers)
+ self.controllers.append(pdu)
+ if not shared_pdu:
+ controller = get_pdu_controller(pdu_ip, pdu_vars)
+ if not controller:
+ logger.warning('Failed creating pdu controller: {}'.format(psu_peer))
+ return
+ outlets = controller.get_outlet_status(hostname=self.dut_hostname)
+ self._update_outlets(outlets, next_index)
+ pdu['outlets'] = outlets
+ pdu['controller'] = controller
+
+ def _get_pdu_controller(self, pdu_index):
+ pdu = self.controllers[pdu_index]
+ return pdu['controller']
+
+ def turn_on_outlet(self, outlet=None):
+ """
+ Turnning on an outlet. The outlet contains enough information
+ to identify the pdu controller + outlet ID.
+ when outlet is None, all outlets will be turned off.
+ """
+ if outlet is not None:
+ controller = self._get_pdu_controller(outlet['pdu_index'])
+ return controller.turn_on_outlet(outlet['outlet_id'])
+ else:
+ # turn on all outlets
+ ret = True
+ for controller in self.controllers:
+ for outlet in controller['outlets']:
+ rc = controller['controller'].turn_on_outlet(outlet['outlet_id'])
+ ret = ret and rc
+
+ return ret
+
+ def turn_off_outlet(self, outlet=None):
+ """
+ Turnning off an outlet. The outlet contains enough information
+ to identify the pdu controller + outlet ID.
+ when outlet is None, all outlets will be turned off.
+ """
+ if outlet is not None:
+ controller = self._get_pdu_controller(outlet['pdu_index'])
+ return controller.turn_off_outlet(outlet['outlet_id'])
+ else:
+ # turn on all outlets
+ ret = True
+ for controller in self.controllers:
+ for outlet in controller['outlets']:
+ rc = controller['controller'].turn_off_outlet(outlet['outlet_id'])
+ ret = ret and rc
+
+ return ret
+
+ def get_outlet_status(self, outlet=None):
+ """
+ Getting outlet status. The outlet contains enough information
+ to identify the pdu controller + outlet ID.
+ when outlet is None, status of all outlets will be returned.
+ """
+ status = []
+ if outlet is not None:
+ pdu_index = outlet['pdu_index']
+ controller = self._get_pdu_controller(pdu_index)
+ outlets = controller.get_outlet_status(outlet=outlet['outlet_id'])
+ self._update_outlets(outlets, pdu_index)
+ status = status + outlets
+ else:
+ # collect all status
+ for pdu_index, controller in enumerate(self.controllers):
+ if len(controller['outlets']) > 0:
+ outlets = controller['controller'].get_outlet_status(hostname=self.dut_hostname)
+ self._update_outlets(outlets, pdu_index)
+ status = status + outlets
+
+ return status
+
+ def close(self):
+ for controller in self.controllers:
+ if len(controller['outlets']) > 0:
+ controller['controller'].close()
+
+
+def _merge_dev_link(devs, links):
+ ret = copy.deepcopy(devs)
+ for host, info in links.items():
+ if host not in ret:
+ ret[host] = {}
+
+ for key, val in info.items():
+ if key not in ret[host]:
+ ret[host][key] = {}
+ ret[host][key].update(val)
+
+ return ret
+
+
+def _build_pdu_manager_from_graph(pduman, dut_hostname, conn_graph_facts, pdu_vars):
+ logger.info('Creating pdu manager from graph information')
+ pdu_devs = conn_graph_facts['device_pdu_info']
+ pdu_links = conn_graph_facts['device_pdu_links']
+ pdu_info = _merge_dev_link(pdu_devs, pdu_links)
+ if dut_hostname not in pdu_info or not pdu_info[dut_hostname]:
+ # No PDU information in graph
+ logger.info('PDU informatin for {} is not found in graph'.format(dut_hostname))
+ return False
+
+ for psu_name, psu_peer in pdu_info[dut_hostname].items():
+ pduman.add_controller(psu_name, psu_peer, pdu_vars)
+
+ return len(pduman.controllers) > 0
+
+
+def _build_pdu_manager_from_inventory(pduman, dut_hostname, pdu_hosts, pdu_vars):
+ logger.info('Creating pdu manager from inventory information')
+ if not pdu_hosts:
+ logger.info('Do not have sufficient PDU information to create PDU manager for host {}'.format(dut_hostname))
+ return False
+
+ for ph, var_list in pdu_hosts.items():
+ controller_ip = var_list.get("ansible_host")
+ if not controller_ip:
+ logger.info('No "ansible_host" is defined in inventory file for "{}"'.format(pdu_hosts))
+ logger.info('Unable to create pdu_controller for {}'.format(dut_hostname))
+ continue
+
+ controller_protocol = var_list.get("protocol")
+ if not controller_protocol:
+ logger.info(
+ 'No protocol is defined in inventory file for "{}". Try to use default "snmp"'.format(pdu_hosts))
+ controller_protocol = 'snmp'
+
+ psu_peer = {
+ 'peerdevice': ph,
+ 'HwSku': 'unknown',
+ 'Protocol': controller_protocol,
+ 'ManagementIp': controller_ip,
+ 'Type': 'Pdu',
+ 'peerport': 'probing',
+ }
+ pduman.add_controller(ph, psu_peer, pdu_vars)
+
+ return len(pduman.controllers) > 0
+
+
+def pdu_manager_factory(dut_hostname, pdu_hosts, conn_graph_facts, pdu_vars):
+ """
+ @summary: Factory function for creating PDU manager instance.
+ @param dut_hostname: DUT host name.
+ @param pdu_hosts: comma separated PDU host names.
+ @param conn_graph_facts: connection graph facts.
+ @param pdu_vars: pdu community strings
+ """
+ logger.info('Creating pdu manager object')
+ pduman = PduManager(dut_hostname)
+ if _build_pdu_manager_from_graph(pduman, dut_hostname, conn_graph_facts, pdu_vars):
+ return pduman
+
+ if _build_pdu_manager_from_inventory(pduman, dut_hostname, pdu_hosts, pdu_vars):
+ return pduman
+
+ return None
diff --git a/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py b/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py
index 56d437ba604..14ea5593d72 100644
--- a/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py
+++ b/tests/common/plugins/pdu_controller/snmp_pdu_controllers.py
@@ -10,6 +10,8 @@
from pysnmp.proto import rfc1902
from pysnmp.entity.rfc3413.oneliner import cmdgen
+logger = logging.getLogger(__name__)
+
class snmpPduController(PduControllerBase):
"""
PDU Controller class for SNMP conrolled PDUs - 'Sentry Switched CDU' and 'APC Web/SNMP Management Card'
@@ -32,7 +34,7 @@ def get_pdu_controller_type(self):
errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
snmp_auth,
cmdgen.UdpTransportTarget((self.controller, 161), timeout=5.0),
- cmdgen.MibVariable(pSYSDESCR,),
+ cmdgen.MibVariable(pSYSDESCR)
)
if errorIndication:
logging.info("Failed to get pdu controller type, exception: " + str(errorIndication))
@@ -59,57 +61,79 @@ def pduCntrlOid(self):
Define Oids based on the PDU Type
"""
# MIB OIDs for 'APC Web/SNMP Management PDU'
- APC_PORT_NAME_BASE_OID = "1.3.6.1.4.1.318.1.1.4.4.2.1.4"
- APC_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.318.1.1.12.3.5.1.1.4"
- APC_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.318.1.1.12.3.3.1.1.4"
+ APC_PORT_NAME_BASE_OID = "1.3.6.1.4.1.318.1.1.4.4.2.1"
+ APC_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.318.1.1.12.3.5.1.1"
+ APC_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.318.1.1.12.3.3.1.1"
# MIB OID for 'Sentry Switched CDU'
- SENTRY_PORT_NAME_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.3.1"
- SENTRY_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.5.1"
- SENTRY_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.11.1"
+ SENTRY_PORT_NAME_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.3"
+ SENTRY_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.5"
+ SENTRY_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.1718.3.2.3.1.11"
# MIB OID for 'Emerson'
- EMERSON_PORT_NAME_BASE_OID = "1.3.6.1.4.1.476.1.42.3.8.50.20.1.10.1.1"
- EMERSON_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.476.1.42.3.8.50.20.1.100.1.1"
- EMERSON_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.476.1.42.3.8.50.20.1.100.1.1"
+ EMERSON_PORT_NAME_BASE_OID = "1.3.6.1.4.1.476.1.42.3.8.50.20.1.10.1"
+ EMERSON_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.476.1.42.3.8.50.20.1.100.1"
+ EMERSON_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.476.1.42.3.8.50.20.1.100.1"
# MIB OID for 'Sentry Switched PDU'
SENTRY4_PORT_NAME_BASE_OID = "1.3.6.1.4.1.1718.4.1.8.2.1.3"
SENTRY4_PORT_STATUS_BASE_OID = "1.3.6.1.4.1.1718.4.1.8.3.1.1"
SENTRY4_PORT_CONTROL_BASE_OID = "1.3.6.1.4.1.1718.4.1.8.5.1.2"
+ SENTRY4_PORT_POWER_BASE_OID = "1.3.6.1.4.1.1718.4.1.8.3.1.9"
self.STATUS_ON = "1"
self.STATUS_OFF = "0"
self.CONTROL_ON = "1"
self.CONTROL_OFF = "2"
+ self.has_lanes = True
+ self.max_lanes = 5
+ self.PORT_POWER_BASE_OID = None
if self.pduType == "APC":
- self.pPORT_NAME_BASE_OID = '.'+APC_PORT_NAME_BASE_OID
- self.pPORT_STATUS_BASE_OID = '.'+APC_PORT_STATUS_BASE_OID
- self.pPORT_CONTROL_BASE_OID = '.'+APC_PORT_CONTROL_BASE_OID
self.PORT_NAME_BASE_OID = APC_PORT_NAME_BASE_OID
self.PORT_STATUS_BASE_OID = APC_PORT_STATUS_BASE_OID
self.PORT_CONTROL_BASE_OID = APC_PORT_CONTROL_BASE_OID
elif self.pduType == "SENTRY":
- self.pPORT_NAME_BASE_OID = '.'+SENTRY_PORT_NAME_BASE_OID
- self.pPORT_STATUS_BASE_OID = '.'+SENTRY_PORT_STATUS_BASE_OID
- self.pPORT_CONTROL_BASE_OID = '.'+SENTRY_PORT_CONTROL_BASE_OID
self.PORT_NAME_BASE_OID = SENTRY_PORT_NAME_BASE_OID
self.PORT_STATUS_BASE_OID = SENTRY_PORT_STATUS_BASE_OID
self.PORT_CONTROL_BASE_OID = SENTRY_PORT_CONTROL_BASE_OID
elif self.pduType == "Emerson":
- self.pPORT_NAME_BASE_OID = '.'+EMERSON_PORT_NAME_BASE_OID
- self.pPORT_STATUS_BASE_OID = '.'+EMERSON_PORT_STATUS_BASE_OID
- self.pPORT_CONTROL_BASE_OID = '.'+EMERSON_PORT_CONTROL_BASE_OID
self.PORT_NAME_BASE_OID = EMERSON_PORT_NAME_BASE_OID
self.PORT_STATUS_BASE_OID = EMERSON_PORT_STATUS_BASE_OID
self.PORT_CONTROL_BASE_OID = EMERSON_PORT_CONTROL_BASE_OID
elif self.pduType == "SENTRY4":
- self.pPORT_NAME_BASE_OID = '.'+SENTRY4_PORT_NAME_BASE_OID
- self.pPORT_STATUS_BASE_OID = '.'+SENTRY4_PORT_STATUS_BASE_OID
- self.pPORT_CONTROL_BASE_OID = '.'+SENTRY4_PORT_CONTROL_BASE_OID
self.PORT_NAME_BASE_OID = SENTRY4_PORT_NAME_BASE_OID
self.PORT_STATUS_BASE_OID = SENTRY4_PORT_STATUS_BASE_OID
self.PORT_CONTROL_BASE_OID = SENTRY4_PORT_CONTROL_BASE_OID
+ self.PORT_POWER_BASE_OID = SENTRY4_PORT_POWER_BASE_OID
+ self.has_lanes = False
+ self.max_lanes = 1
else:
pass
+ def _build_outlet_maps(self, port_oid, label):
+ self.port_oid_dict[port_oid] = { 'label' : label }
+ self.port_label_dict[label] = { 'port_oid' : port_oid }
+
+
+ def _probe_lane(self, lane_id, cmdGen, snmp_auth):
+ pdu_port_base = self.PORT_NAME_BASE_OID
+ query_oid = '.' + pdu_port_base
+ if self.has_lanes:
+ query_oid = query_oid + str(lane_id)
+
+ errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((self.controller, 161)),
+ cmdgen.MibVariable(query_oid)
+ )
+ if errorIndication:
+ logging.debug("Failed to get ports controlling PSUs of DUT, exception: " + str(errorIndication))
+ else:
+ for varBinds in varTable:
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ port_oid = current_oid.replace(pdu_port_base, '')
+ label = val.prettyPrint().lower()
+ self._build_outlet_maps(port_oid, label)
+
+
def _get_pdu_ports(self):
"""
@summary: Helper method for getting PDU ports connected to PSUs of DUT
@@ -118,68 +142,31 @@ def _get_pdu_ports(self):
This method depends on this configuration to find out the PDU ports connected to PSUs of specific DUT.
"""
if not self.pduType:
- logging.info('PDU type is unknown')
+ logging.info('PDU type is unknown: pdu_ip {}'.format(self.controller))
return
- max_lane = 5
- host_matched = False
cmdGen = cmdgen.CommandGenerator()
snmp_auth = cmdgen.CommunityData(self.snmp_rocommunity)
- for lane_id in range(1, max_lane + 1):
- pdu_port_base = self.PORT_NAME_BASE_OID[0: -1] + str(lane_id)
-
- errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
- snmp_auth,
- cmdgen.UdpTransportTarget((self.controller, 161)),
- cmdgen.MibVariable("." + pdu_port_base,),
- )
- if errorIndication:
- logging.debug("Failed to get ports controlling PSUs of DUT, exception: " + str(errorIndication))
- else:
- for varBinds in varTable:
- for oid, val in varBinds:
- current_oid = oid.prettyPrint()
- current_val = val.prettyPrint()
- if self.hostname.lower() in current_val.lower():
- host_matched = True
- # Remove the preceding PORT_NAME_BASE_OID, remaining string is the PDU port ID
- self.pdu_ports.append(current_oid.replace(pdu_port_base, ''))
- if host_matched:
- self.map_host_to_lane(lane_id)
- break
- else:
- logging.error("{} device is not attached to any of PDU port".format(self.hostname.lower()))
+ for lane_id in range(1, self.max_lanes + 1):
+ self._probe_lane(lane_id, cmdGen, snmp_auth)
- def map_host_to_lane(self, lane_id):
- """
- Dynamically update Oids based on the PDU lane ID
- """
- if self.pduType == "SENTRY4":
- # No need to update lane for SENTRY4
- return
- self.pPORT_NAME_BASE_OID = self.pPORT_NAME_BASE_OID[0: -1] + str(lane_id)
- self.pPORT_STATUS_BASE_OID = self.pPORT_STATUS_BASE_OID[0: -1] + str(lane_id)
- self.pPORT_CONTROL_BASE_OID = self.pPORT_CONTROL_BASE_OID[0: -1] + str(lane_id)
- self.PORT_NAME_BASE_OID = self.PORT_NAME_BASE_OID[0: -1] + str(lane_id)
- self.PORT_STATUS_BASE_OID = self.PORT_STATUS_BASE_OID[0: -1] + str(lane_id)
- self.PORT_CONTROL_BASE_OID = self.PORT_CONTROL_BASE_OID[0: -1] + str(lane_id)
-
- def __init__(self, hostname, controller, pdu):
+ def __init__(self, controller, pdu):
logging.info("Initializing " + self.__class__.__name__)
PduControllerBase.__init__(self)
- self.hostname = hostname
self.controller = controller
self.snmp_rocommunity = pdu['snmp_rocommunity']
self.snmp_rwcommunity = pdu['snmp_rwcommunity']
- self.pdu_ports = []
self.pduType = None
+ self.port_oid_dict = {}
+ self.port_label_dict = {}
self.get_pdu_controller_type()
self.pduCntrlOid()
self._get_pdu_ports()
logging.info("Initialized " + self.__class__.__name__)
+
def turn_on_outlet(self, outlet):
"""
@summary: Use SNMP to turn on power to PDU of DUT specified by outlet
@@ -190,23 +177,19 @@ def turn_on_outlet(self, outlet):
Because of this, currently we just find out which PDU ports are connected to PSUs of which DUT. We cannot
find out the exact mapping between PDU ports and PSUs of DUT.
- To overcome this limitation, the trick is to convert the specified outlet to integer, then calculate the mode
- upon the number of PSUs on DUT. The calculated mode is used as an index to get PDU ports ID stored in
- self.pdu_ports. But still, we cannot gurante that outlet 0 is first PDU of DUT, and so on.
-
@param outlet: ID of the PDU on SONiC DUT
@return: Return true if successfully execute the command for turning on power. Otherwise return False.
"""
if not self.pduType:
- logging.error('Unable to turn on: PDU type is unknown')
+ logging.error('Unable to turn on: PDU type is unknown: pdu_ip {}'.format(self.controller))
return False
- port_oid = self.pPORT_CONTROL_BASE_OID + self.pdu_ports[rfc1902.Integer(outlet)]
+ port_oid = '.' + self.PORT_CONTROL_BASE_OID + outlet
errorIndication, errorStatus, _, _ = \
cmdgen.CommandGenerator().setCmd(
cmdgen.CommunityData(self.snmp_rwcommunity),
cmdgen.UdpTransportTarget((self.controller, 161)),
- (port_oid, rfc1902.Integer(self.CONTROL_ON)),
+ (port_oid, rfc1902.Integer(self.CONTROL_ON))
)
if errorIndication or errorStatus != 0:
logging.debug("Failed to turn on outlet %s, exception: %s" % (str(outlet), str(errorStatus)))
@@ -223,30 +206,71 @@ def turn_off_outlet(self, outlet):
Because of this, currently we just find out which PDU outlets are connected to PSUs of which DUT. We cannot
find out the exact mapping between PDU outlets and PSUs of DUT.
- To overcome this limitation, the trick is to convert the specified outlet to integer, then calculate the mode
- upon the number of PSUs on DUT. The calculated mode is used as an index to get PDU ports ID stored in
- self.pdu_ports. But still, we cannot guarantee that outlet 0 is first PSU of DUT, and so on.
-
@param outlet: ID of the outlet on PDU
@return: Return true if successfully execute the command for turning off power. Otherwise return False.
"""
if not self.pduType:
- logging.error('Unable to turn off: PDU type is unknown')
+ logging.error('Unable to turn off: PDU type is unknown: pdu_ip {}'.format(self.controller))
return False
- port_oid = self.pPORT_CONTROL_BASE_OID + self.pdu_ports[rfc1902.Integer(outlet)]
+ port_oid = '.' + self.PORT_CONTROL_BASE_OID + outlet
errorIndication, errorStatus, _, _ = \
cmdgen.CommandGenerator().setCmd(
cmdgen.CommunityData(self.snmp_rwcommunity),
cmdgen.UdpTransportTarget((self.controller, 161)),
- (port_oid, rfc1902.Integer(self.CONTROL_OFF)),
+ (port_oid, rfc1902.Integer(self.CONTROL_OFF))
)
if errorIndication or errorStatus != 0:
logging.debug("Failed to turn on outlet %s, exception: %s" % (str(outlet), str(errorStatus)))
return False
return True
- def get_outlet_status(self, outlet=None):
+
+ def _get_one_outlet_power(self, cmdGen, snmp_auth, port_id, status):
+ if not self.PORT_POWER_BASE_OID:
+ return
+
+ query_id = '.' + self.PORT_POWER_BASE_OID + port_id
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((self.controller, 161)),
+ cmdgen.MibVariable(query_id)
+ )
+ if errorIndication:
+ logging.debug("Failed to get outlet power level of DUT outlet, exception: " + str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ port_oid = current_oid.replace(self.PORT_POWER_BASE_OID, '')
+ if port_oid == port_id:
+ status['output_watts'] = current_val
+ return
+
+
+ def _get_one_outlet_status(self, cmdGen, snmp_auth, port_id):
+ query_id = '.' + self.PORT_STATUS_BASE_OID + port_id
+ errorIndication, errorStatus, errorIndex, varBinds = cmdGen.getCmd(
+ snmp_auth,
+ cmdgen.UdpTransportTarget((self.controller, 161)),
+ cmdgen.MibVariable(query_id)
+ )
+ if errorIndication:
+ logging.debug("Failed to outlet status of PDU, exception: " + str(errorIndication))
+
+ for oid, val in varBinds:
+ current_oid = oid.prettyPrint()
+ current_val = val.prettyPrint()
+ port_oid = current_oid.replace(self.PORT_STATUS_BASE_OID, '')
+ if port_oid == port_id:
+ status = {"outlet_id": port_oid, "outlet_on": True if current_val == self.STATUS_ON else False}
+ self._get_one_outlet_power(cmdGen, snmp_auth, port_id, status)
+ return status
+
+ return None
+
+
+ def get_outlet_status(self, outlet=None, hostname=None):
"""
@summary: Use SNMP to get status of PDU ports supplying power to PSUs of DUT
@@ -256,42 +280,38 @@ def get_outlet_status(self, outlet=None):
Because of this, currently we just find out which PDU ports are connected to PSUs of which DUT. We cannot
find out the exact mapping between PDU outlets and PSUs of DUT.
- To overcome this limitation, the trick is to convert the specified outlet to integer, then calculate the mode
- upon the number of PSUs on DUT. The calculated mode is used as an index to get PDU outlet ID stored in
- self.pdu_ports. But still, we cannot guarantee that outlet 0 is first PSU of DUT, and so on.
-
@param outlet: Optional. If specified, only return status of PDU outlet connected to specified PSU of DUT. If
omitted, return status of all PDU outlets connected to PSUs of DUT.
@return: Return status of PDU outlets connected to PSUs of DUT in a list of dictionary. Example result:
- [{"outlet_id": 0, "outlet_on": True}, {"outlet_id": 1, "outlet_on": True}]
+ [{"outlet_id": "0.0.1", "outlet_on": True}, {"outlet_id": "0.0.2", "outlet_on": True}]
The outlet in returned result is integer starts from 0.
"""
results = []
if not self.pduType:
- logging.error('Unable to retrieve status: PDU type is unknown')
+ logging.error('Unable to retrieve status: PDU type is unknown: pdu_ip {}'.format(self.controller))
return results
+ if not outlet and not hostname:
+ # Return status of all outlets
+ ports = self.port_oid_dict.keys()
+ elif outlet:
+ ports = [ oid for oid in self.port_oid_dict.keys() if oid.endswith(outlet) ]
+ if not ports:
+ logging.error("Outlet ID {} doesn't belong to PDU {}".format(outlet, self.controller))
+ elif hostname:
+ hn = hostname.lower()
+ ports = [ self.port_label_dict[label]['port_oid'] for label in self.port_label_dict.keys() if hn in label ]
+ if not ports:
+ logging.error("{} device is not attached to any outlet of PDU {}".format(hn, self.controller))
+
cmdGen = cmdgen.CommandGenerator()
snmp_auth = cmdgen.CommunityData(self.snmp_rocommunity)
- errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd(
- snmp_auth,
- cmdgen.UdpTransportTarget((self.controller, 161)),
- cmdgen.MibVariable(self.pPORT_STATUS_BASE_OID,),
- )
- if errorIndication:
- logging.debug("Failed to get ports controlling PSUs of DUT, exception: " + str(errorIndication))
- for varBinds in varTable:
- for oid, val in varBinds:
- current_oid = oid.prettyPrint()
- current_val = val.prettyPrint()
- for idx, port in enumerate(self.pdu_ports):
- port_oid = self.PORT_STATUS_BASE_OID + port
- if current_oid == port_oid:
- status = {"outlet_id": idx, "outlet_on": True if current_val == self.STATUS_ON else False}
- results.append(status)
- if outlet is not None:
- idx = int(outlet) % len(self.pdu_ports)
- results = results[idx:idx+1]
+
+ for port in ports:
+ status = self._get_one_outlet_status(cmdGen, snmp_auth, port)
+ if status:
+ results.append(status)
+
logging.info("Got outlet status: %s" % str(results))
return results
@@ -299,9 +319,9 @@ def close(self):
pass
-def get_pdu_controller(controller_ip, dut_hostname, pdu):
+def get_pdu_controller(controller_ip, pdu):
"""
@summary: Factory function to create the actual PDU controller object.
@return: The actual PDU controller object. Returns None if something went wrong.
"""
- return snmpPduController(dut_hostname, controller_ip, pdu)
+ return snmpPduController(controller_ip, pdu)
diff --git a/tests/common/plugins/sanity_check/README.md b/tests/common/plugins/sanity_check/README.md
index dd5b23326e2..11049b99fbb 100644
--- a/tests/common/plugins/sanity_check/README.md
+++ b/tests/common/plugins/sanity_check/README.md
@@ -20,7 +20,7 @@ pytest_plugins = [
sonic-mgmt/tests/common/plugins/sanity_check:
```
@pytest.fixture(scope="module", autouse=True)
-def sanity_check(localhost, duthost, request, fanouthosts):
+def sanity_check(localhost, duthosts, request, fanouthosts, tbinfo):
...
```
@@ -43,23 +43,36 @@ We can specify multiple keyword arguments to override the default sanity check b
* allow_recover: Boolean, specify whether recovery should be performed in case of pre-test sanity check failed. Default: False.
* recover_method: String, specify the method to be used for recovery. Default: "config_reload". Supported values: refer to sonic-mgmt/tests/common/plugins/sanity_check/constants.py::RECOVER_METHODS.
* post_check: Boolean, specify whether post-test sanity check should be performed. Default: False.
-* check_items: Please refer to below section for more detailed explanation.
+* check_items: Please refer to below section for details.
+* post_check_items: Please refer to below section for details.
### Fine tune `check_items`
-We can use keyword argument `check_items` to fine tune the items to be checked in sanity check. At the time of writing, we implemented 2 check items.
+We can use keyword argument `check_items` to fine tune the items to be checked in sanity check. All the function starts with 'check_' in `tests/common/plugins/sanity_check/checks.py` is a check item. Item name is the function name without the `check_` prefix. Currently supported check items:
* services: Check the status of all critical services.
* interfaces: Check the status of network interfaces.
-Please refer to sonic-mgmt/tests/common/plugins/sanity_check/constants::SUPPORTED_CHECK_ITEMS for the latest supported check items.
+* bgp: Check BGP status.
+* dbmemory: Check database memory.
+* monit: Check monit status.
+* processes: Check status of critical processes.
+* mux_simulator: Check status of mux simulator.
+*
+Please refer to `sonic-mgmt/tests/common/plugins/sanity_check/checks.py` for the latest supported check items.
-Value for `check_items` should be a tuple or list of strings. Each item in the tuple or list should be a string. The string can be name of the supported check items with optional prefix `+` or `-` or `_`. Unsupported check items will be ignored.
+Value for `check_items` should be a tuple or list of strings. Each item in the tuple or list must be a string. The string can be name of the supported check items with optional prefix `+` or `-` or `_`. Unsupported check items will be ignored.
If a supported check item is prefixed with `-` or `_`, then this item will not be checked in sanity. For items with prefix `+` or without prefixes, the item should be included in the list of items to be checked in sanity.
-With this design, we can extend the sanity check items in the future. By default, only a very basic set of sanity check is performed. For some test scripts that do not need some default sanity check items or need some extra sanity check items, we can use this syntax to tailor the check items that fit best for the current test script.
+With this design, we can extend the sanity check items in the future. By default, only a very basic set of sanity check is performed. For some test scripts that do not need some default sanity check items or need some extra sanity check items, we can use this syntax to fine tune the check items that fit best for the current test script.
User can change check item list by passing parameter from command line --check_items="add remove string". Example: --check_items="_services,+bgp" means do not check services, but add bgp to the check list. This parameter is not an absolute list, it is addition or subtraction from the existing list. On command line "-" has special meaning. So, we need to prefix "_" to skip a check item.
+### Fine tune `post_check_items`
+
+By default, the list of post check items is the same as pre test. Sometimes, we may need different post test checks. In this case, we can use the `post_check_items` marker argument or `--post_check_items` command line option to fine tune post check items just the same way as pre test.
+
+Please be noted that the post check items list is always based on the final pre test check items (updated by test marker args and command line options).
+
## Log collecting
If sanity check is to be performed, the script will also run some commands on the DUT to collect some basic information for debugging. Please refer to sonic-mgmt/tests/common/plugins/sanity_check/constants::PRINT_LOGS for the list of logs that will be collected.
@@ -94,13 +107,36 @@ If we reboot the DUT or perform a config reload on it, the networking service is
## Example
+file: test_feature1.py
```
import pytest
-pytestmark = [pytest.mark.sanity_check(allow_recover=True, recover_method="reboot", post_check=True, check_items=("-interfaces",))]
+pytestmark = [pytest.mark.sanity_check(
+ allow_recover=True,
+ recover_method="reboot",
+ post_check=True,
+ check_items=("-interfaces",))]
```
-
In the above example, both pre-test and post-test sanity check will be performed. Status of the interfaces will not be checked. In case of failure in the first round of sanity check, the sanity check plugin will try to recover the DUT by rebooting it.
+
+file: test_feature2.py
+```
+import pytest
+pytestmark = [pytest.mark.sanity_check(
+ allow_recover=False,
+ recover_method="adaptive",
+ post_check=True,
+ check_items=["-interfaces",],
+ post_check_items=["-services",])]
+```
+In the above example, both pre-test and post-test sanity check will be performed. Status of the interfaces will not be checked in pre and post test. Status of services will not be checked in post test. In case of failure in the first round of sanity check, the sanity check plugin will try to recover the DUT using method 'adaptive'.
+
+The check options can be overridden by command line options:
+* --skip_sanity
+* --allow_recover
+* --check_items
+* --post_check_items
+
References:
* [Working with custom markers](https://docs.pytest.org/en/latest/example/markers.html)
* [Pytest request](https://docs.pytest.org/en/latest/reference.html#request)
diff --git a/tests/common/plugins/sanity_check/__init__.py b/tests/common/plugins/sanity_check/__init__.py
index b0fc853afa4..c5a1635e0bb 100644
--- a/tests/common/plugins/sanity_check/__init__.py
+++ b/tests/common/plugins/sanity_check/__init__.py
@@ -12,14 +12,37 @@
from tests.common.plugins.sanity_check import checks
from tests.common.plugins.sanity_check.checks import *
from tests.common.plugins.sanity_check.recover import recover
+from tests.common.plugins.sanity_check.constants import STAGE_PRE_TEST, STAGE_POST_TEST
from tests.common.helpers.assertions import pytest_assert as pt_assert
-from tests.common.plugins.sanity_check.checks import check_monit
-
logger = logging.getLogger(__name__)
-SUPPORTED_CHECKS = [member[0].replace('check_', '') for member in getmembers(checks, isfunction)
- if member[0].startswith('check_')]
+
+def is_check_item(member):
+ '''
+ Function to filter for valid check items
+
+ Used in conjunction with inspect.getmembers to make sure that only valid check functions/fixtures executed
+
+ Valid check items must meet the following criteria:
+ - Is a function
+ - Is defined directly in sanity_checks/checks.py, NOT imported from another file
+ - Begins with the string 'check_'
+
+ Args:
+ member (object): The object to checked
+ Returns:
+ (bool) True if 'member' is a valid check function, False otherwise
+ '''
+ if isfunction(member):
+ in_check_file = member.__module__ == 'tests.common.plugins.sanity_check.checks'
+ starts_with_check = member.__name__.startswith('check_')
+ return in_check_file and starts_with_check
+ else:
+ return False
+
+
+SUPPORTED_CHECKS = [member[0].replace('check_', '') for member in getmembers(checks, is_check_item)]
def _item2fixture(item):
@@ -37,7 +60,7 @@ def _update_check_items(old_items, new_items, supported_items):
for new_item in new_items:
if not new_item:
continue
- if new_item[0] in ["_", "-"]: # Remove default check item
+ if new_item[0] in ["_", "-"]: # Skip a check item
new_item = new_item[1:]
if new_item in updated_items:
logger.info("Skip checking '%s'" % new_item)
@@ -56,18 +79,36 @@ def _update_check_items(old_items, new_items, supported_items):
def print_logs(duthosts):
for dut in duthosts:
- logger.info("Run commands to print logs, logs to be collected on {}:\n{}"\
- .format(dut.hostname, json.dumps(constants.PRINT_LOGS, indent=4)))
- for cmd in constants.PRINT_LOGS.values():
- res = dut.shell(cmd, module_ignore_errors=True)
- logger.info("cmd='%s', output:\n%s" % (cmd, json.dumps(res["stdout_lines"], indent=4)))
+ logger.info("Run commands to print logs")
+
+ cmds = constants.PRINT_LOGS.values()
+ results = dut.shell_cmds(cmds=cmds, module_ignore_errors=True, verbose=False)['results']
+ outputs = []
+ for res in results:
+ res.pop('stdout')
+ res.pop('stderr')
+ outputs.append(res)
+ logger.info(json.dumps(outputs, indent=4))
+
+
+def filter_check_items(tbinfo, check_items):
+ filtered_check_items = copy.deepcopy(check_items)
+
+ # ignore BGP check for particular topology type
+ if tbinfo['topo']['type'] == 'ptf' and 'bgp' in filtered_check_items:
+ filtered_check_items.remove('bgp')
+ if 'dualtor' not in tbinfo['topo']['name'] and 'mux_simulator' in filtered_check_items:
+ filtered_check_items.remove('mux_simulator')
-def do_checks(request, check_items):
+ return filtered_check_items
+
+
+def do_checks(request, check_items, *args, **kwargs):
check_results = []
for item in check_items:
check_fixture = request.getfixturevalue(_item2fixture(item))
- results = check_fixture()
+ results = check_fixture(*args, **kwargs)
if results and isinstance(results, list):
check_results.extend(results)
elif results:
@@ -77,12 +118,12 @@ def do_checks(request, check_items):
@pytest.fixture(scope="module", autouse=True)
def sanity_check(localhost, duthosts, request, fanouthosts, tbinfo):
- logger.info("Prepare pre-test sanity check")
+ logger.info("Prepare sanity check")
skip_sanity = False
allow_recover = False
recover_method = "adaptive"
- check_items = set(copy.deepcopy(SUPPORTED_CHECKS)) # Default check items
+ pre_check_items = set(copy.deepcopy(SUPPORTED_CHECKS)) # Default check items
post_check = False
customized_sanity_check = None
@@ -103,9 +144,11 @@ def sanity_check(localhost, duthosts, request, fanouthosts, tbinfo):
logger.info("Fall back to use default recover method 'config_reload'")
recover_method = "config_reload"
- check_items = _update_check_items(check_items,
- customized_sanity_check.kwargs.get("check_items", []),
- SUPPORTED_CHECKS)
+ pre_check_items = _update_check_items(
+ pre_check_items,
+ customized_sanity_check.kwargs.get("check_items", []),
+ SUPPORTED_CHECKS)
+
post_check = customized_sanity_check.kwargs.get("post_check", False)
if request.config.option.skip_sanity:
@@ -118,57 +161,84 @@ def sanity_check(localhost, duthosts, request, fanouthosts, tbinfo):
if request.config.option.allow_recover:
allow_recover = True
- cli_items = request.config.getoption("--check_items")
- if cli_items:
- cli_items_list=str(cli_items).split(',')
- check_items = _update_check_items(check_items, cli_items_list, SUPPORTED_CHECKS)
+ if request.config.option.post_check:
+ post_check = True
- # ignore BGP check for particular topology type
- if tbinfo['topo']['type'] == 'ptf' and 'bgp' in check_items:
- check_items.remove('bgp')
+ cli_check_items = request.config.getoption("--check_items")
+ cli_post_check_items = request.config.getoption("--post_check_items")
- logger.info("Sanity check settings: skip_sanity=%s, check_items=%s, allow_recover=%s, recover_method=%s, post_check=%s" % \
- (skip_sanity, check_items, allow_recover, recover_method, post_check))
+ if cli_check_items:
+ logger.info('Fine tune pre-test check items based on CLI option --check_items')
+ cli_items_list=str(cli_check_items).split(',')
+ pre_check_items = _update_check_items(pre_check_items, cli_items_list, SUPPORTED_CHECKS)
- if not check_items:
- logger.info("No sanity check item is specified, no pre-test sanity check")
- yield
- logger.info("No sanity check item is specified, no post-test sanity check")
- return
+ pre_check_items = filter_check_items(tbinfo, pre_check_items) # Filter out un-supported checks.
- # Dynamically attach selected check fixtures to node
- for item in check_items:
+ if post_check:
+ # Prepare post test check items based on the collected pre test check items.
+ post_check_items = copy.copy(pre_check_items)
+ if customized_sanity_check:
+ post_check_items = _update_check_items(
+ post_check_items,
+ customized_sanity_check.kwargs.get("post_check_items", []),
+ SUPPORTED_CHECKS)
+
+ if cli_post_check_items:
+ logger.info('Fine tune post-test check items based on CLI option --post_check_items')
+ cli_post_items_list = str(cli_post_check_items).split(',')
+ post_check_items = _update_check_items(post_check_items, cli_post_items_list, SUPPORTED_CHECKS)
+
+ post_check_items = filter_check_items(tbinfo, post_check_items) # Filter out un-supported checks.
+ else:
+ post_check_items = set()
+
+ logger.info("Sanity check settings: skip_sanity=%s, pre_check_items=%s, allow_recover=%s, recover_method=%s, post_check=%s, post_check_items=%s" % \
+ (skip_sanity, pre_check_items, allow_recover, recover_method, post_check, post_check_items))
+
+ for item in pre_check_items.union(post_check_items):
request.fixturenames.append(_item2fixture(item))
- print_logs(duthosts)
+ # Workaround for pytest requirement.
+ # Each possibly used check fixture must be executed in setup phase. Otherwise there could be teardown error.
+ request.getfixturevalue(_item2fixture(item))
- logger.info("Start pre-test sanity checks")
- check_results = do_checks(request, check_items)
- logger.debug("Pre-test sanity check results:\n%s" % json.dumps(check_results, indent=4))
+ if pre_check_items:
+ logger.info("Start pre-test sanity checks")
- failed_results = [result for result in check_results if result['failed']]
- if failed_results:
- if not allow_recover:
- pt_assert(False, "!!!!!!!!!!!!!!!!Pre-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\
- .format(json.dumps(failed_results, indent=4)))
- else:
- dut_failed_results = defaultdict(list)
- for failed_result in failed_results:
- if 'host' in failed_result:
- dut_failed_results[failed_result['host']].append(failed_result)
- for dut_name, dut_results in dut_failed_results.items():
- recover(duthosts[dut_name], localhost, fanouthosts, dut_results, recover_method)
+ # Dynamically attach selected check fixtures to node
+ for item in set(pre_check_items):
+ request.fixturenames.append(_item2fixture(item))
- logger.info("Run sanity check again after recovery")
- new_check_results = do_checks(request, check_items)
- logger.debug("Pre-test sanity check after recovery results:\n%s" % json.dumps(new_check_results, indent=4))
+ print_logs(duthosts)
- new_failed_results = [result for result in new_check_results if result['failed']]
- if new_failed_results:
- pt_assert(False, "!!!!!!!!!!!!!!!! Pre-test sanity check after recovery failed: !!!!!!!!!!!!!!!!\n{}"\
- .format(json.dumps(new_failed_results, indent=4)))
+ check_results = do_checks(request, pre_check_items, stage=STAGE_PRE_TEST)
+ logger.debug("Pre-test sanity check results:\n%s" % json.dumps(check_results, indent=4))
- logger.info("Done pre-test sanity check")
+ failed_results = [result for result in check_results if result['failed']]
+ if failed_results:
+ if not allow_recover:
+ pt_assert(False, "!!!!!!!!!!!!!!!!Pre-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\
+ .format(json.dumps(failed_results, indent=4)))
+ else:
+ dut_failed_results = defaultdict(list)
+ for failed_result in failed_results:
+ if 'host' in failed_result:
+ dut_failed_results[failed_result['host']].append(failed_result)
+ for dut_name, dut_results in dut_failed_results.items():
+ recover(duthosts[dut_name], localhost, fanouthosts, dut_results, recover_method)
+
+ logger.info("Run sanity check again after recovery")
+ new_check_results = do_checks(request, pre_check_items, stage=STAGE_PRE_TEST, after_recovery=True)
+ logger.debug("Pre-test sanity check after recovery results:\n%s" % json.dumps(new_check_results, indent=4))
+
+ new_failed_results = [result for result in new_check_results if result['failed']]
+ if new_failed_results:
+ pt_assert(False, "!!!!!!!!!!!!!!!! Pre-test sanity check after recovery failed: !!!!!!!!!!!!!!!!\n{}"\
+ .format(json.dumps(new_failed_results, indent=4)))
+
+ logger.info("Done pre-test sanity check")
+ else:
+ logger.info('No pre-test sanity check item, skip pre-test sanity check.')
yield
@@ -176,14 +246,16 @@ def sanity_check(localhost, duthosts, request, fanouthosts, tbinfo):
logger.info("No post-test check is required. Done post-test sanity check")
return
- logger.info("Start post-test sanity check")
- post_check_results = do_checks(request, check_items)
- logger.debug("Post-test sanity check results:\n%s" % json.dumps(post_check_results, indent=4))
+ if post_check_items:
+ logger.info("Start post-test sanity check")
+ post_check_results = do_checks(request, post_check_items, stage=STAGE_POST_TEST)
+ logger.debug("Post-test sanity check results:\n%s" % json.dumps(post_check_results, indent=4))
- post_failed_results = [result for result in post_check_results if result['failed']]
- if post_failed_results:
- pt_assert(False, "!!!!!!!!!!!!!!!! Post-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\
- .format(json.dumps(post_failed_results, indent=4)))
+ post_failed_results = [result for result in post_check_results if result['failed']]
+ if post_failed_results:
+ pt_assert(False, "!!!!!!!!!!!!!!!! Post-test sanity check failed: !!!!!!!!!!!!!!!!\n{}"\
+ .format(json.dumps(post_failed_results, indent=4)))
- logger.info("Done post-test sanity check")
- return
+ logger.info("Done post-test sanity check")
+ else:
+ logger.info('No post-test sanity check item, skip post-test sanity check.')
diff --git a/tests/common/plugins/sanity_check/checks.py b/tests/common/plugins/sanity_check/checks.py
index 81ff0df9466..468ca852b36 100644
--- a/tests/common/plugins/sanity_check/checks.py
+++ b/tests/common/plugins/sanity_check/checks.py
@@ -1,11 +1,15 @@
import re
import json
import logging
-import time
-
+import ptf.testutils as testutils
import pytest
+import time
+from ipaddress import ip_network, IPv4Network
+from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait, wait_until
+from tests.common.dualtor.mux_simulator_control import *
+from tests.common.dualtor.dual_tor_utils import *
logger = logging.getLogger(__name__)
SYSTEM_STABILIZE_MAX_TIME = 300
@@ -18,12 +22,13 @@
'check_bgp',
'check_dbmemory',
'check_monit',
- 'check_processes']
+ 'check_processes',
+ 'check_mux_simulator']
@pytest.fixture(scope="module")
def check_services(duthosts):
- def _check():
+ def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking services status on %s..." % dut.hostname)
@@ -104,9 +109,9 @@ def _find_down_ports(dut, phy_interfaces, ip_interfaces):
@pytest.fixture(scope="module")
def check_interfaces(duthosts):
- def _check():
+ def _check(*args, **kwargs):
check_results = []
- for dut in duthosts:
+ for dut in duthosts.frontend_nodes:
logger.info("Checking interfaces status on %s..." % dut.hostname)
networking_uptime = dut.get_networking_uptime().seconds
@@ -120,7 +125,7 @@ def _check():
for asic in dut.asics:
ip_interfaces = []
cfg_facts = asic.config_facts(host=dut.hostname,
- source="persistent")['ansible_facts']
+ source="persistent", verbose=False)['ansible_facts']
phy_interfaces = [k for k, v in cfg_facts["PORT"].items() if "admin_status" in v and v["admin_status"] == "up"]
if "PORTCHANNEL_INTERFACE" in cfg_facts:
ip_interfaces = cfg_facts["PORTCHANNEL_INTERFACE"].keys()
@@ -159,9 +164,9 @@ def _check():
@pytest.fixture(scope="module")
def check_bgp(duthosts):
- def _check():
+ def _check(*args, **kwargs):
check_results = []
- for dut in duthosts:
+ for dut in duthosts.frontend_nodes:
def _check_bgp_status_helper():
asic_check_results = []
bgp_facts = dut.bgp_facts(asic_index='all')
@@ -237,7 +242,7 @@ def _is_db_omem_over_threshold(command_output):
@pytest.fixture(scope="module")
def check_dbmemory(duthosts):
- def _check():
+ def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking database memory on %s..." % dut.hostname)
@@ -278,6 +283,170 @@ def _check_monit_services_status(check_result, monit_services_status):
return check_result
+def get_arp_pkt_info(dut):
+ intf_mac = dut.facts['router_mac']
+ mgmt_ipv4 = None
+
+ mgmt_intf_facts = dut.get_running_config_facts()['MGMT_INTERFACE']
+
+ for mgmt_intf in mgmt_intf_facts:
+ for mgmt_ip in mgmt_intf_facts[mgmt_intf]:
+ if type(ip_network(mgmt_ip, strict=False)) is IPv4Network:
+ mgmt_ipv4 = mgmt_ip.split('/')[0]
+ return intf_mac, mgmt_ipv4
+
+ return intf_mac, mgmt_ipv4
+
+
+@pytest.fixture(scope='module')
+def check_mux_simulator(ptf_server_intf, tor_mux_intf, ptfadapter, upper_tor_host, lower_tor_host, \
+ recover_all_directions, toggle_simulator_port_to_upper_tor, toggle_simulator_port_to_lower_tor, check_simulator_read_side):
+
+ def _check(*args, **kwargs):
+ """
+ @summary: Checks if the OVS bridge mux simulator is functioning correctly
+ @return: A dictionary containing the testing result of the PTF interface tested:
+ {
+ 'failed': ,
+ 'failed_reason': ,
+ 'intf': ' mux simulator'
+ }
+ """
+ results = {
+ 'failed': False,
+ 'failed_reason': '',
+ 'check_item': '{} mux simulator'.format(ptf_server_intf)
+ }
+
+ logger.info("Checking mux simulator status for PTF interface {}".format(ptf_server_intf))
+ ptf_port_index = int(ptf_server_intf.replace('eth', ''))
+ recover_all_directions(tor_mux_intf)
+
+ upper_tor_intf_mac, upper_tor_mgmt_ip = get_arp_pkt_info(upper_tor_host)
+ lower_tor_intf_mac, lower_tor_mgmt_ip = get_arp_pkt_info(lower_tor_host)
+
+ upper_tor_ping_tgt_ip = '10.10.10.1'
+ lower_tor_ping_tgt_ip = '10.10.10.2'
+ ptf_arp_tgt_ip = '10.10.10.3'
+ ping_cmd = 'ping -I {} {} -c 1 -W 1; true'
+
+ upper_tor_exp_pkt = testutils.simple_arp_packet(eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src=upper_tor_intf_mac,
+ ip_snd=upper_tor_mgmt_ip,
+ ip_tgt=upper_tor_ping_tgt_ip,
+ hw_snd=upper_tor_intf_mac)
+ lower_tor_exp_pkt = testutils.simple_arp_packet(eth_dst='ff:ff:ff:ff:ff:ff',
+ eth_src=lower_tor_intf_mac,
+ ip_snd=lower_tor_mgmt_ip,
+ ip_tgt=lower_tor_ping_tgt_ip,
+ hw_snd=lower_tor_intf_mac)
+
+ ptf_arp_pkt = testutils.simple_arp_packet(ip_tgt=ptf_arp_tgt_ip,
+ ip_snd=ptf_arp_tgt_ip,
+ arp_op=2)
+
+ # Clear ARP tables to start in consistent state
+ upper_tor_host.shell("ip neigh flush all")
+ lower_tor_host.shell("ip neigh flush all")
+
+ # Run tests with upper ToR active
+ toggle_simulator_port_to_upper_tor(tor_mux_intf)
+
+ try:
+ pytest_assert(check_simulator_read_side(tor_mux_intf) == 1)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Unable to switch active link to upper ToR'
+ return results
+
+ # Ping from both ToRs, expect only message from upper ToR to reach PTF
+ upper_tor_host.shell(ping_cmd.format(tor_mux_intf, upper_tor_ping_tgt_ip))
+ try:
+ testutils.verify_packet(ptfadapter, upper_tor_exp_pkt, ptf_port_index)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from active upper ToR not received'
+ return results
+
+ lower_tor_host.shell(ping_cmd.format(tor_mux_intf, lower_tor_ping_tgt_ip))
+ try:
+ testutils.verify_no_packet(ptfadapter, lower_tor_exp_pkt, ptf_port_index)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from standby lower ToR received'
+ return results
+
+ # Send dummy ARP packets from PTF to ToR. Ensure that ARP is learned on both ToRs
+ upper_tor_host.shell("ip neigh flush all")
+ lower_tor_host.shell("ip neigh flush all")
+ testutils.send_packet(ptfadapter, ptf_port_index, ptf_arp_pkt)
+
+ upper_tor_arp_table = upper_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
+ lower_tor_arp_table = lower_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
+ try:
+ pytest_assert(ptf_arp_tgt_ip in upper_tor_arp_table)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from PTF not received on active upper ToR'
+ return results
+
+ try:
+ pytest_assert(ptf_arp_tgt_ip in lower_tor_arp_table)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from PTF not received on standby lower ToR'
+ return results
+
+ # Repeat all tests with lower ToR active
+ toggle_simulator_port_to_lower_tor(tor_mux_intf)
+ try:
+ pytest_assert(check_simulator_read_side(tor_mux_intf) == 2)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Unable to switch active link to lower ToR'
+ return results
+
+ lower_tor_host.shell(ping_cmd.format(tor_mux_intf, lower_tor_ping_tgt_ip))
+ try:
+ testutils.verify_packet(ptfadapter, lower_tor_exp_pkt, ptf_port_index)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from active lower ToR not received'
+ return results
+
+ upper_tor_host.shell(ping_cmd.format(tor_mux_intf, upper_tor_ping_tgt_ip))
+ try:
+ testutils.verify_no_packet(ptfadapter, upper_tor_exp_pkt, ptf_port_index)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from standby upper ToR received'
+ return results
+
+ upper_tor_host.shell("ip neigh flush all")
+ lower_tor_host.shell("ip neigh flush all")
+ testutils.send_packet(ptfadapter, ptf_port_index, ptf_arp_pkt)
+
+ upper_tor_arp_table = upper_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
+ lower_tor_arp_table = lower_tor_host.switch_arptable()['ansible_facts']['arptable']['v4']
+ try:
+ pytest_assert(ptf_arp_tgt_ip in upper_tor_arp_table)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from PTF not received on standby upper ToR'
+ return results
+
+ try:
+ pytest_assert(ptf_arp_tgt_ip in lower_tor_arp_table)
+ except AssertionError:
+ results['failed'] = True
+ results['failed_reason'] = 'Packet from PTF not received on active lower ToR'
+ return results
+
+ logger.info('Finished mux simulator check')
+ return results
+ return _check
+
+
@pytest.fixture(scope="module")
def check_monit(duthosts):
"""
@@ -285,7 +454,7 @@ def check_monit(duthosts):
in the correct status or not.
@return: A dictionary contains the testing result (failed or not failed) and the status of each service.
"""
- def _check():
+ def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking status of each Monit service...")
@@ -343,7 +512,7 @@ def _check():
@pytest.fixture(scope="module")
def check_processes(duthosts):
- def _check():
+ def _check(*args, **kwargs):
check_results = []
for dut in duthosts:
logger.info("Checking process status on %s..." % dut.hostname)
diff --git a/tests/common/plugins/sanity_check/constants.py b/tests/common/plugins/sanity_check/constants.py
index 4daa1c74825..173a7645da5 100644
--- a/tests/common/plugins/sanity_check/constants.py
+++ b/tests/common/plugins/sanity_check/constants.py
@@ -20,3 +20,6 @@
"fast_reboot": {"cmd": "fast_reboot", "reboot": True, "adaptive": False, 'recover_wait': 120},
"adaptive": {"cmd": None, "reboot": False, "adaptive": True, 'recover_wait': 30},
} # All supported recover methods
+
+STAGE_PRE_TEST = 'stage_pre_test'
+STAGE_POST_TEST = 'stage_post_test'
diff --git a/tests/common/reboot.py b/tests/common/reboot.py
index 09183f7cb6c..1e9207db3c8 100644
--- a/tests/common/reboot.py
+++ b/tests/common/reboot.py
@@ -12,6 +12,7 @@
REBOOT_TYPE_WARM = "warm"
REBOOT_TYPE_COLD = "cold"
+REBOOT_TYPE_SOFT = "soft"
REBOOT_TYPE_FAST = "fast"
REBOOT_TYPE_POWEROFF = "power off"
REBOOT_TYPE_WATCHDOG = "watchdog"
@@ -44,6 +45,13 @@
"cause": r"'reboot'|Non-Hardware \(reboot",
"test_reboot_cause_only": False
},
+ REBOOT_TYPE_SOFT: {
+ "command": "soft-reboot",
+ "timeout": 300,
+ "wait": 120,
+ "cause": "soft-reboot",
+ "test_reboot_cause_only": False
+ },
REBOOT_TYPE_FAST: {
"command": "fast-reboot",
"timeout": 180,
@@ -161,7 +169,7 @@ def execute_reboot_helper():
logger.info('waiting for warmboot-finalizer service to become activating')
finalizer_state = get_warmboot_finalizer_state(duthost)
while finalizer_state != 'activating':
- dut_datetime_after_ssh = duthost.get_up_time()
+ dut_datetime_after_ssh = duthost.get_now_time()
time_passed = float(dut_datetime_after_ssh.strftime("%s")) - float(dut_datetime.strftime("%s"))
if time_passed > wait:
raise Exception('warmboot-finalizer never reached state "activating"')
diff --git a/tests/common/system_utils/docker.py b/tests/common/system_utils/docker.py
index d088a00702b..bd8c347849a 100644
--- a/tests/common/system_utils/docker.py
+++ b/tests/common/system_utils/docker.py
@@ -56,17 +56,6 @@ def load_docker_registry_info(duthost, creds):
return DockerRegistryInfo(host, username, password)
-def delete_container(duthost, container_name):
- """Attempts to delete the specified container from the duthost.
-
- Args:
- duthost (SonicHost): The target device.
- container_name (str): The name of the container to delete.
- """
- duthost.command("docker stop {}".format(container_name), module_ignore_errors=True)
- duthost.command("docker rm {}".format(container_name), module_ignore_errors=True)
-
-
def download_image(duthost, registry, image_name, image_version="latest"):
"""Attempts to download the specified image from the registry.
@@ -191,8 +180,8 @@ def restore_default_syncd(duthost, creds):
def _perform_swap_syncd_shutdown_check(duthost):
def ready_for_swap():
if any([
- duthost.is_container_present("syncd"),
- duthost.is_container_present("swss"),
+ duthost.is_container_running("syncd"),
+ duthost.is_container_running("swss"),
not duthost.is_bgp_state_idle()
]):
return False
diff --git a/tests/common/templates/pfc_storm_onyx.j2 b/tests/common/templates/pfc_storm_onyx.j2
index 89902595a0f..7b61d39e4a8 100644
--- a/tests/common/templates/pfc_storm_onyx.j2
+++ b/tests/common/templates/pfc_storm_onyx.j2
@@ -6,9 +6,9 @@ configure terminal
docker exec {{ container_name }} /bin/bash
cd /root/
{% if (pfc_asym is defined) and (pfc_asym == True) %}
-{% if pfc_storm_defer_time is defined %} sleep {{pfc_storm_defer_time}} &&{% endif %} nohup python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "") | replace("/", "_")}} &
+{% if pfc_storm_defer_time is defined %} sleep {{pfc_storm_defer_time}} &&{% endif %} nohup python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "sl1p") | replace("/", "sp")}} &
{% else %}
-{% if pfc_storm_defer_time is defined %} sleep {{pfc_storm_defer_time}} &&{% endif %} nohup python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "") | replace("/", "_")}} -r {{ansible_eth0_ipv4_addr}} &
+{% if pfc_storm_defer_time is defined %} sleep {{pfc_storm_defer_time}} &&{% endif %} nohup python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "sl1p") | replace("/", "sp")}} -r {{ansible_eth0_ipv4_addr}} &
{% endif %}
exit
diff --git a/tests/common/templates/pfc_storm_stop_onyx.j2 b/tests/common/templates/pfc_storm_stop_onyx.j2
index b04ddaedd66..f179371899d 100644
--- a/tests/common/templates/pfc_storm_stop_onyx.j2
+++ b/tests/common/templates/pfc_storm_stop_onyx.j2
@@ -6,9 +6,9 @@ docker exec {{ container_name }} /bin/bash
cd /root/
{% if (pfc_asym is defined) and (pfc_asym == True) %}
-{% if pfc_storm_stop_defer_time is defined %} sleep {{pfc_storm_stop_defer_time}} &&{% endif %} pkill -f "python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "") | replace("/", "_")}}" {% if pfc_storm_stop_defer_time is defined %}&{% endif %}
+{% if pfc_storm_stop_defer_time is defined %} sleep {{pfc_storm_stop_defer_time}} &&{% endif %} pkill -f "python {{pfc_gen_file}} -p {{pfc_queue_index}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "sl1p") | replace("/", "sp")}}" {% if pfc_storm_stop_defer_time is defined %}&{% endif %}
{% else %}
-{% if pfc_storm_stop_defer_time is defined %} sleep {{pfc_storm_stop_defer_time}} &&{% endif %} pkill -f "python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "") | replace("/", "_")}} -r {{ansible_eth0_ipv4_addr}}" {% if pfc_storm_stop_defer_time is defined %}&{% endif %}
+{% if pfc_storm_stop_defer_time is defined %} sleep {{pfc_storm_stop_defer_time}} &&{% endif %} pkill -f "python {{pfc_gen_file}} -p {{(1).__lshift__(pfc_queue_index)}} -t 65535 -n {{pfc_frames_number}} -i {{pfc_fanout_interface | replace("ernet 1/", "sl1p") | replace("/", "sp")}} -r {{ansible_eth0_ipv4_addr}}" {% if pfc_storm_stop_defer_time is defined %}&{% endif %}
{% endif %}
exit
diff --git a/tests/common/testbed.py b/tests/common/testbed.py
index 83bab7cecb3..55c45de7b91 100644
--- a/tests/common/testbed.py
+++ b/tests/common/testbed.py
@@ -21,7 +21,8 @@
class TestbedInfo(object):
"""Parse the testbed file used to describe whole testbed info."""
- TESTBED_FIELDS = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_DEPRECATED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'comment')
+ TESTBED_FIELDS_RECOMMENDED = ('conf-name', 'group-name', 'topo', 'ptf_image_name', 'ptf', 'ptf_ip', 'ptf_ipv6', 'server', 'vm_base', 'dut', 'inv_name', 'auto_recover', 'comment')
def __init__(self, testbed_file):
if testbed_file.endswith(".csv"):
@@ -69,13 +70,18 @@ def _ip_mask_to_cidr(self, ip_address, netmask):
def _read_testbed_topo_from_csv(self):
"""Read csv testbed info file."""
with open(self.testbed_filename) as f:
- topo = csv.DictReader(f, fieldnames=self.TESTBED_FIELDS,
- delimiter=',')
+ header = [field.strip(' #') for field in f.readline().strip().split(',')]
+ print(header)
+ if len(header) == len(self.TESTBED_FIELDS_DEPRECATED):
+ self.testbed_fields = self.TESTBED_FIELDS_DEPRECATED
+ elif len(header) == len(self.TESTBED_FIELDS_RECOMMENDED):
+ self.testbed_fields = self.TESTBED_FIELDS_RECOMMENDED
+ else:
+ raise ValueError('Unsupported testbed fields %s' % str(header))
+ for header_field, expect_field in zip(header, self.testbed_fields):
+ assert header_field == expect_field
- # Validate all field are in the same order and are present
- header = next(topo)
- for field in self.TESTBED_FIELDS:
- assert header[field].replace('#', '').strip() == field
+ topo = csv.DictReader(f, fieldnames=self.testbed_fields, delimiter=',')
for line in topo:
if line['conf-name'].lstrip().startswith('#'):
@@ -89,7 +95,7 @@ def _read_testbed_topo_from_csv(self):
self._cidr_to_ip_mask(line['ptf_ipv6'])
line['duts'] = line['dut'].translate(string.maketrans("", ""), "[] ").split(';')
- line['duts_map'] = {dut:line['duts'].index(dut) for dut in line['duts']}
+ line['duts_map'] = {dut: line['duts'].index(dut) for dut in line['duts']}
del line['dut']
self.testbed_topo[line['conf-name']] = line
@@ -135,6 +141,7 @@ class IncIndentDumper(yaml.Dumper):
[1]: https://web.archive.org/web/20170903201521/https://pyyaml.org/ticket/64
[2]: https://github.com/yaml/pyyaml/issues/127
"""
+
def increase_indent(self, flow=False, indentless=False):
return yaml.Dumper.increase_indent(self, flow, False)
@@ -153,7 +160,7 @@ def write_line_break(self, data=None):
ptf_ipv6 = self._ip_mask_to_cidr(tb_dict["ptf_ipv6"],
tb_dict["ptf_netmask_v6"])
testbed_mapping = zip(
- self.TESTBED_FIELDS,
+ self.testbed_fields,
[
tb_name,
tb_dict["group-name"],
diff --git a/tests/common/utilities.py b/tests/common/utilities.py
index 90e34470e08..7cb903aaafb 100644
--- a/tests/common/utilities.py
+++ b/tests/common/utilities.py
@@ -2,6 +2,8 @@
Utility functions can re-used in testing scripts.
"""
import collections
+import inspect
+import ipaddress
import logging
import six
import sys
@@ -13,6 +15,7 @@
from ansible.inventory.manager import InventoryManager
from ansible.vars.manager import VariableManager
+from tests.common.cache import cached
from tests.common.cache import FactsCache
logger = logging.getLogger(__name__)
@@ -161,7 +164,48 @@ def get_inventory_files(request):
return inv_files
-def get_host_vars(inv_files, hostname, variable=None):
+def _get_parameter(function, func_args, func_kargs, argname):
+ """Get the parameter passed as argname to function."""
+ args_binding = inspect.getcallargs(function, *func_args, **func_kargs)
+ return args_binding.get(argname) or args_binding.get("kargs").get(argname)
+
+
+def zone_getter_factory(argname):
+ """Create zone getter function used to retrieve parameter as zone."""
+
+ def _zone_getter(function, func_args, func_kargs):
+ param = _get_parameter(function, func_args, func_kargs, argname)
+ if param is None:
+ raise ValueError("Failed to get parameter '%s' from function %s as zone." % (argname, function))
+ return param
+
+ return _zone_getter
+
+
+def _check_inv_files_after_read(facts, function, func_args, func_kargs):
+ """Check if inventory file matches after read host variable from cached files."""
+ if facts is not FactsCache.NOTEXIST:
+ inv_files = _get_parameter(function, func_args, func_kargs, "inv_files")
+ if inv_files == facts["inv_files"]:
+ return facts["vars"]
+ # no facts cached or facts not in the same inventory, return `NOTEXIST`
+ # to force calling the decorated function to get facts
+ return FactsCache.NOTEXIST
+
+
+def _mark_inv_files_before_write(facts, function, func_args, func_kargs):
+ """Add inventory to the facts before write to cached file."""
+ inv_files = _get_parameter(function, func_args, func_kargs, "inv_files")
+ return {"inv_files": inv_files, "vars": facts}
+
+
+@cached(
+ "host_vars",
+ zone_getter=zone_getter_factory("hostname"),
+ after_read=_check_inv_files_after_read,
+ before_write=_mark_inv_files_before_write
+)
+def get_host_vars(inv_files, hostname):
"""Use ansible's InventoryManager to get value of variables defined for the specified host in the specified
inventory files.
@@ -169,32 +213,25 @@ def get_host_vars(inv_files, hostname, variable=None):
inv_files (list or string): List of inventory file pathes, or string of a single inventory file path. In tests,
it can be get from request.config.getoption("ansible_inventory").
hostname (string): Hostname
- variable (string or None): Variable name. Defaults to None.
Returns:
- string or dict or None: If variable name is specified, return the variable value. If variable is not found,
- return None. If variable name is not specified, return all variables in a dictionary. If the host is not
- found, return None.
+ dict or None: dict if the host is found, None if the host is not found.
"""
- cached_vars = cache.read(hostname, 'host_vars')
- if cached_vars and cached_vars['inv_files'] == inv_files:
- host_vars = cached_vars['vars']
- else:
- im = get_inventory_manager(inv_files)
- host = im.get_host(hostname)
- if not host:
- logger.error("Unable to find host {} in {}".format(hostname, str(inv_files)))
- return None
- host_vars = host.vars
- cache.write(hostname, 'host_vars', {'inv_files': inv_files, 'vars': host_vars})
-
- if variable:
- return host_vars.get(variable, None)
- else:
- return host_vars
+ im = get_inventory_manager(inv_files)
+ host = im.get_host(hostname)
+ if not host:
+ logger.error("Unable to find host {} in {}".format(hostname, str(inv_files)))
+ return None
+ return host.vars.copy()
-def get_host_visible_vars(inv_files, hostname, variable=None):
+@cached(
+ "host_visible_vars",
+ zone_getter=zone_getter_factory("hostname"),
+ after_read=_check_inv_files_after_read,
+ before_write=_mark_inv_files_before_write
+)
+def get_host_visible_vars(inv_files, hostname):
"""Use ansible's VariableManager and InventoryManager to get value of variables visible to the specified host.
The variable could be defined in host_vars or in group_vars that the host belongs to.
@@ -202,35 +239,26 @@ def get_host_visible_vars(inv_files, hostname, variable=None):
inv_files (list or string): List of inventory file pathes, or string of a single inventory file path. In tests,
it can be get from request.config.getoption("ansible_inventory").
hostname (string): Hostname
- variable (string or None): Variable name. Defaults to None.
Returns:
- string or dict or None: If variable name is specified, return the variable value. If variable is not found,
- return None. If variable name is not specified, return all variables in a dictionary. If the host is not
- found, return None.
+ dict or None: dict if the host is found, None if the host is not found.
"""
- cached_vars = cache.read(hostname, 'host_visible_vars')
-
- if cached_vars and cached_vars['inv_files'] == inv_files:
- host_visible_vars = cached_vars['vars']
- else:
- vm = get_variable_manager(inv_files)
- im = vm._inventory
- host = im.get_host(hostname)
- if not host:
- logger.error("Unable to find host {} in {}".format(hostname, str(inv_files)))
- return None
-
- host_visible_vars = vm.get_vars(host=host)
- cache.write(hostname, 'host_visible_vars', {'inv_files': inv_files, 'vars': host_visible_vars})
-
- if variable:
- return host_visible_vars.get(variable, None)
- else:
- return host_visible_vars
+ vm = get_variable_manager(inv_files)
+ im = vm._inventory
+ host = im.get_host(hostname)
+ if not host:
+ logger.error("Unable to find host {} in {}".format(hostname, str(inv_files)))
+ return None
+ return vm.get_vars(host=host)
-def get_group_visible_vars(inv_files, group_name, variable=None):
+@cached(
+ "group_visible_vars",
+ zone_getter=zone_getter_factory("group_name"),
+ after_read=_check_inv_files_after_read,
+ before_write=_mark_inv_files_before_write
+)
+def get_group_visible_vars(inv_files, group_name):
"""Use ansible's VariableManager and InventoryManager to get value of variables visible to the first host belongs
to the specified group. The variable could be defined in host_vars of the first host or in group_vars that the host
belongs to.
@@ -239,39 +267,45 @@ def get_group_visible_vars(inv_files, group_name, variable=None):
inv_files (list or string): List of inventory file pathes, or string of a single inventory file path. In tests,
it can be get from request.config.getoption("ansible_inventory").
group_name (string): Name of group in ansible inventory.
- variable (string or None): Variable name. Defaults to None.
Returns:
- string or dict or None: If variable name is specified, return the variable value. If variable is not found,
- return None. If variable name is not specified, return all variables in a dictionary. If the group is not
- found or there is no host in the group, return None.
+ dict or None: dict if the host is found, None if the host is not found.
"""
- cached_vars = cache.read(group_name, 'group_visible_vars')
- if cached_vars and cached_vars['inv_files'] == inv_files:
- group_visible_vars = cached_vars['vars']
- else:
- vm = get_variable_manager(inv_files)
- im = vm._inventory
- group = im.groups.get(group_name, None)
- if not group:
- logger.error("Unable to find group {} in {}".format(group_name, str(inv_files)))
- return None
- group_hosts = group.get_hosts()
- if len(group_hosts) == 0:
- logger.error("No host in group {}".format(group_name))
- return None
- first_host = group_hosts[0]
-
- group_visible_vars = vm.get_vars(host=first_host)
- cache.write(group_name, 'group_visible_vars', {'inv_files': inv_files, 'vars': group_visible_vars})
-
- if variable:
- return group_visible_vars.get(variable, None)
- else:
- return group_visible_vars
+ vm = get_variable_manager(inv_files)
+ im = vm._inventory
+ group = im.groups.get(group_name, None)
+ if not group:
+ logger.error("Unable to find group {} in {}".format(group_name, str(inv_files)))
+ return None
+ group_hosts = group.get_hosts()
+ if len(group_hosts) == 0:
+ logger.error("No host in group {}".format(group_name))
+ return None
+ first_host = group_hosts[0]
+ return vm.get_vars(host=first_host)
-def get_test_server_vars(inv_files, server, variable=None):
+def get_test_server_host(inv_files, server):
+ """Get test server ansible host from the 'server' column in testbed file."""
+ vm = get_variable_manager(inv_files)
+ im = vm._inventory
+ group = im.groups.get(server, None)
+ if not group:
+ logger.error("Unable to find group {} in {}".format(server, str(inv_files)))
+ return None
+ for host in group.get_hosts():
+ if not re.match(r'VM\d+', host.name): # This must be the test server host
+ return host
+ return None
+
+
+@cached(
+ "test_server_vars",
+ zone_getter=zone_getter_factory("server"),
+ after_read=_check_inv_files_after_read,
+ before_write=_mark_inv_files_before_write
+)
+def get_test_server_vars(inv_files, server):
"""Use ansible's VariableManager and InventoryManager to get value of variables of test server belong to specified
server group.
@@ -284,35 +318,53 @@ def get_test_server_vars(inv_files, server, variable=None):
inv_files (list or string): List of inventory file pathes, or string of a single inventory file path. In tests,
it can be get from request.config.getoption("ansible_inventory").
server (string): Server of test setup in testbed.csv file.
- variable (string or None): Variable name. Defaults to None.
Returns:
- string or dict or None: If variable name is specified, return the variable value. If variable is not found,
- return None. If variable name is not specified, return all variables in a dictionary. If the server group
- is not found or there is no test server host in the group, return None.
+ dict or None: dict if the host is found, None if the host is not found.
"""
- cached_vars = cache.read(server, 'test_server_vars')
- if cached_vars and cached_vars['inv_files'] == inv_files:
- test_server_vars = cached_vars['vars']
- else:
- test_server_vars = None
-
- vm = get_variable_manager(inv_files)
- im = vm._inventory
- group = im.groups.get(server, None)
- if not group:
- logger.error("Unable to find group {} in {}".format(server, str(inv_files)))
- return None
- for host in group.get_hosts():
- if not re.match(r'VM\d+', host.name): # This must be the test server host
- test_server_vars = host.vars
- cache.write(server, 'test_server_vars', {'inv_files': inv_files, 'vars': test_server_vars})
-
- if test_server_vars:
- if variable:
- return test_server_vars.get(variable, None)
- else:
- return test_server_vars
- else:
+ host = get_test_server_host(inv_files, server)
+ if not host:
logger.error("Unable to find test server host under group {}".format(server))
return None
+ return host.vars.copy()
+
+
+@cached(
+ "test_server_visible_vars",
+ zone_getter=zone_getter_factory("server"),
+ after_read=_check_inv_files_after_read,
+ before_write=_mark_inv_files_before_write
+)
+def get_test_server_visible_vars(inv_files, server):
+ """Use ansible's VariableManager and InventoryManager to get value of variables visible to the specified server
+ group.
+
+ In testbed.csv file, we can get the server name of each test setup under the 'server' column. For example
+ 'server_1', 'server_2', etc. This server name is indeed a group name in used ansible inventory files. This group
+ contains children groups for test server and VMs. This function is try to just return the variables visible to
+ the server group.
+
+ Args:
+ inv_files (list or string): List of inventory file pathes, or string of a single inventory file path. In tests,
+ it can be get from request.config.getoption("ansible_inventory").
+ server (string): Server of test setup in testbed.csv file.
+
+ Returns:
+ dict or None: dict if the host is found, None if the host is not found.
+ """
+ test_server_host = get_test_server_host(inv_files, server)
+ vm = get_variable_manager(inv_files)
+ if not test_server_host:
+ logger.error("Unable to find host %s in %s", test_server_host, inv_files)
+ return None
+
+ return vm.get_vars(host=test_server_host)
+
+
+def is_ipv4_address(ip_address):
+ """Check if ip address is ipv4."""
+ try:
+ ipaddress.IPv4Address(ip_address)
+ return True
+ except ipaddress.AddressValueError:
+ return False
diff --git a/tests/conftest.py b/tests/conftest.py
index 739bf6be6d8..cde75aa2cf3 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -12,19 +12,26 @@
from datetime import datetime
from tests.common.fixtures.conn_graph_facts import conn_graph_facts
-from tests.common.devices import Localhost
-from tests.common.devices import PTFHost, EosHost, FanoutHost, K8sMasterHost, K8sMasterCluster
+from tests.common.devices.local import Localhost
+from tests.common.devices.ptf import PTFHost
+from tests.common.devices.eos import EosHost
+from tests.common.devices.fanout import FanoutHost
+from tests.common.devices.k8s import K8sMasterHost
+from tests.common.devices.k8s import K8sMasterCluster
+from tests.common.devices.duthosts import DutHosts
+from tests.common.devices.vmhost import VMHost
+
from tests.common.helpers.constants import ASIC_PARAM_TYPE_ALL, ASIC_PARAM_TYPE_FRONTEND, DEFAULT_ASIC_ID
from tests.common.helpers.dut_ports import encode_dut_port_name
-from tests.common.devices import DutHosts
from tests.common.testbed import TestbedInfo
from tests.common.utilities import get_inventory_files
from tests.common.utilities import get_host_vars
from tests.common.utilities import get_host_visible_vars
+from tests.common.utilities import get_test_server_host
from tests.common.helpers.dut_utils import is_supervisor_node, is_frontend_node
from tests.common.cache import FactsCache
-from tests.common.connections import ConsoleHost
+from tests.common.connections.console_host import ConsoleHost
logger = logging.getLogger(__name__)
@@ -41,6 +48,7 @@
'tests.common.plugins.test_completeness',
'tests.common.plugins.log_section_start',
'tests.common.plugins.custom_fixtures',
+ 'tests.common.dualtor',
'tests.vxlan')
@@ -85,6 +93,10 @@ def pytest_addoption(parser):
help="Allow recovery attempt in sanity check in case of failure")
parser.addoption("--check_items", action="store", default=False,
help="Change (add|remove) check items in the check list")
+ parser.addoption("--post_check", action="store_true", default=False,
+ help="Perform post test sanity check if sanity check is enabled")
+ parser.addoption("--post_check_items", action="store", default=False,
+ help="Change (add|remove) post test check items based on pre test check items")
########################
# pre-test options #
@@ -154,7 +166,7 @@ def get_tbinfo(request):
raise ValueError("testbed and testbed_file are required!")
testbedinfo = cache.read(tbname, 'tbinfo')
- if not testbedinfo:
+ if testbedinfo is cache.NOTEXIST:
testbedinfo = TestbedInfo(tbfile)
cache.write(tbname, 'tbinfo', testbedinfo)
@@ -199,6 +211,7 @@ def duthost(duthosts, request):
return duthost
+
@pytest.fixture(scope="module")
def rand_one_dut_hostname(request):
"""
@@ -209,6 +222,27 @@ def rand_one_dut_hostname(request):
return dut_hostnames[0]
+@pytest.fixture(scope="module")
+def rand_selected_dut(duthosts, rand_one_dut_hostname):
+ """
+ Return the randomly selected duthost
+ """
+ return duthosts[rand_one_dut_hostname]
+
+
+@pytest.fixture(scope="module")
+def rand_unselected_dut(request, duthosts, rand_one_dut_hostname):
+ """
+ Return the left duthost after random selection.
+ Return None for non dualtor testbed
+ """
+ dut_hostnames = generate_params_dut_hostname(request)
+ if len(dut_hostnames) <= 1:
+ return None
+ idx = dut_hostnames.index(rand_one_dut_hostname)
+ return duthosts[dut_hostnames[1 - idx]]
+
+
@pytest.fixture(scope="module")
def rand_one_dut_portname_oper_up(request):
oper_up_ports = generate_port_lists(request, "oper_up_ports")
@@ -346,6 +380,15 @@ def fanouthosts(ansible_adhoc, conn_graph_facts, creds):
pass
return fanout_hosts
+
+@pytest.fixture(scope="session")
+def vmhost(ansible_adhoc, request, tbinfo):
+ server = tbinfo["server"]
+ inv_files = request.config.option.ansible_inventory
+ vmhost = get_test_server_host(inv_files, server)
+ return VMHost(ansible_adhoc, vmhost.name)
+
+
@pytest.fixture(scope='session')
def eos():
""" read and yield eos configuration """
@@ -658,29 +701,32 @@ def generate_params_supervisor_hostname(request):
# Expecting only a single supervisor node
if is_supervisor_node(inv_files, dut):
return [dut]
- pytest.fail("Test selected require a supervisor node, " +
- "none of the DUTs '{}' in testbed '{}' are a supervisor node".format(duts, tbname))
+ # If there are no supervisor cards in a multi-dut tesbed, we are dealing with all pizza box in the testbed, pick the first DUT
+ return [duts[0]]
-def generate_param_asic_index(request, dut_indices, param_type):
+def generate_param_asic_index(request, dut_hostnames, param_type, random_asic=False):
_, tbinfo = get_tbinfo(request)
inv_files = get_inventory_files(request)
- logging.info("generating {} asic indicies for DUT [{}] in ".format(param_type, dut_indices))
- #if the params are not present treat the device as a single asic device
- asic_index_params = [DEFAULT_ASIC_ID]
+ logging.info("generating {} asic indicies for DUT [{}] in ".format(param_type, dut_hostnames))
- for dut_id in dut_indices:
- dut = tbinfo['duts'][dut_id]
+ asic_index_params = []
+ for dut in dut_hostnames:
inv_data = get_host_visible_vars(inv_files, dut)
- if inv_data is not None:
+ # if the params are not present treat the device as a single asic device
+ dut_asic_params = [DEFAULT_ASIC_ID]
+ if inv_data:
if param_type == ASIC_PARAM_TYPE_ALL and ASIC_PARAM_TYPE_ALL in inv_data:
if int(inv_data[ASIC_PARAM_TYPE_ALL]) == 1:
- asic_index_params = [DEFAULT_ASIC_ID]
+ dut_asic_params = [DEFAULT_ASIC_ID]
else:
- asic_index_params = range(int(inv_data[ASIC_PARAM_TYPE_ALL]))
+ dut_asic_params = range(int(inv_data[ASIC_PARAM_TYPE_ALL]))
elif param_type == ASIC_PARAM_TYPE_FRONTEND and ASIC_PARAM_TYPE_FRONTEND in inv_data:
- asic_index_params = inv_data[ASIC_PARAM_TYPE_FRONTEND]
- logging.info("dut_index {} dut name {} asics params = {}".format(
- dut_id, dut, asic_index_params))
+ dut_asic_params = inv_data[ASIC_PARAM_TYPE_FRONTEND]
+ logging.info("dut name {} asics params = {}".format(dut, dut_asic_params))
+ if random_asic:
+ asic_index_params.append(random.sample(dut_asic_params, 1))
+ else:
+ asic_index_params.append(dut_asic_params)
return asic_index_params
@@ -805,44 +851,69 @@ def generate_priority_lists(request, prio_scope):
_hosts_per_hwsku_per_module = {}
def pytest_generate_tests(metafunc):
# The topology always has atleast 1 dut
- dut_indices = [0]
+ dut_fixture_name = None
+ duts_selected = None
global _frontend_hosts_per_hwsku_per_module, _hosts_per_hwsku_per_module
- # Enumerators ("enum_dut_index", "enum_dut_hostname", "rand_one_dut_hostname") are mutually exclusive
- if "enum_dut_index" in metafunc.fixturenames:
- dut_indices = generate_params_dut_index(metafunc)
- metafunc.parametrize("enum_dut_index", dut_indices, scope="module")
- elif "enum_dut_hostname" in metafunc.fixturenames:
- dut_hostnames = generate_params_dut_hostname(metafunc)
- metafunc.parametrize("enum_dut_hostname", dut_hostnames, scope="module")
+ # Enumerators for duts are mutually exclusive
+ if "enum_dut_hostname" in metafunc.fixturenames:
+ duts_selected = generate_params_dut_hostname(metafunc)
+ dut_fixture_name = "enum_dut_hostname"
elif "enum_supervisor_dut_hostname" in metafunc.fixturenames:
- supervisor_hosts = generate_params_supervisor_hostname(metafunc)
- metafunc.parametrize("enum_supervisor_dut_hostname", supervisor_hosts, scope="module")
+ duts_selected = generate_params_supervisor_hostname(metafunc)
+ dut_fixture_name = "enum_supervisor_dut_hostname"
elif "enum_frontend_dut_hostname" in metafunc.fixturenames:
- frontend_hosts = generate_params_frontend_hostname(metafunc)
- metafunc.parametrize("enum_frontend_dut_hostname", frontend_hosts, scope="module")
+ duts_selected = generate_params_frontend_hostname(metafunc)
+ dut_fixture_name = "enum_frontend_dut_hostname"
elif "enum_rand_one_per_hwsku_hostname" in metafunc.fixturenames:
if metafunc.module not in _hosts_per_hwsku_per_module:
hosts_per_hwsku = generate_params_hostname_rand_per_hwsku(metafunc)
_hosts_per_hwsku_per_module[metafunc.module] = hosts_per_hwsku
- hosts = _hosts_per_hwsku_per_module[metafunc.module]
- metafunc.parametrize("enum_rand_one_per_hwsku_hostname", hosts, scope="module")
+ duts_selected = _hosts_per_hwsku_per_module[metafunc.module]
+ dut_fixture_name = "enum_rand_one_per_hwsku_hostname"
elif "enum_rand_one_per_hwsku_frontend_hostname" in metafunc.fixturenames:
if metafunc.module not in _frontend_hosts_per_hwsku_per_module:
hosts_per_hwsku = generate_params_hostname_rand_per_hwsku(metafunc, frontend_only=True)
_frontend_hosts_per_hwsku_per_module[metafunc.module] = hosts_per_hwsku
- hosts = _frontend_hosts_per_hwsku_per_module[metafunc.module]
- metafunc.parametrize("enum_rand_one_per_hwsku_frontend_hostname", hosts, scope="module")
+ duts_selected = _frontend_hosts_per_hwsku_per_module[metafunc.module]
+ dut_fixture_name = "enum_rand_one_per_hwsku_frontend_hostname"
+ asics_selected = None
+ asic_fixture_name = None
if "enum_asic_index" in metafunc.fixturenames:
- metafunc.parametrize("enum_asic_index", generate_param_asic_index(metafunc, dut_indices, ASIC_PARAM_TYPE_ALL))
- if "enum_frontend_asic_index" in metafunc.fixturenames:
- metafunc.parametrize(
- "enum_frontend_asic_index",
- generate_param_asic_index(
- metafunc, dut_indices, ASIC_PARAM_TYPE_FRONTEND
- ),
- scope="class"
- )
+ if duts_selected is None:
+ tbname, tbinfo = get_tbinfo(metafunc)
+ duts_selected = [tbinfo["duts"][0]]
+ asic_fixture_name = "enum_asic_index"
+ asics_selected = generate_param_asic_index(metafunc, duts_selected, ASIC_PARAM_TYPE_ALL)
+ elif "enum_frontend_asic_index" in metafunc.fixturenames:
+ if duts_selected is None:
+ tbname, tbinfo = get_tbinfo(metafunc)
+ duts_selected = [tbinfo["duts"][0]]
+ asic_fixture_name = "enum_frontend_asic_index"
+ asics_selected = generate_param_asic_index(metafunc, duts_selected,ASIC_PARAM_TYPE_FRONTEND)
+ elif "enum_rand_one_asic_index" in metafunc.fixturenames:
+ if duts_selected is None:
+ tbname, tbinfo = get_tbinfo(metafunc)
+ duts_selected = [tbinfo["duts"][0]]
+ asic_fixture_name = "enum_rand_one_asic_index"
+ asics_selected = generate_param_asic_index(metafunc, duts_selected, ASIC_PARAM_TYPE_ALL, random_asic=True)
+
+ # Create parameterization tuple of dut_fixture_name and asic_fixture_name to parameterize
+ if dut_fixture_name and asic_fixture_name:
+ # parameterize on both - create tuple for each
+ tuple_list = []
+ for a_dut_index, a_dut in enumerate(duts_selected):
+ for a_asic in asics_selected[a_dut_index]:
+ # Create tuple of dut and asic index
+ tuple_list.append((a_dut, a_asic))
+ metafunc.parametrize(dut_fixture_name + "," + asic_fixture_name, tuple_list, scope="module")
+ elif dut_fixture_name:
+ # parameterize only on DUT
+ metafunc.parametrize(dut_fixture_name, duts_selected, scope="module")
+ elif asic_fixture_name:
+ # We have no duts selected, so need asic list for the first DUT
+ metafunc.parametrize(asic_fixture_name, asics_selected[0], scope="module")
+
if "enum_dut_portname" in metafunc.fixturenames:
metafunc.parametrize("enum_dut_portname", generate_port_lists(metafunc, "all_ports"))
if "enum_dut_portname_oper_up" in metafunc.fixturenames:
@@ -880,3 +951,19 @@ def duthost_console(localhost, creds, request):
console_password=creds['console_password'][vars['console_type']])
yield host
host.disconnect()
+
+@pytest.fixture(scope='session')
+def cleanup_cache_for_session(request):
+ """
+ This fixture allows developers to cleanup the cached data for all DUTs in the testbed before test.
+ Use cases:
+ - Running tests where some 'facts' about the DUT that get cached are changed.
+ - Running tests/regression without running test_pretest which has a test to clean up cache (PR#2978)
+ - Test case development phase to work out testbed information changes.
+
+ This fixture is not automatically applied, if you want to use it, you have to add a call to it in your tests.
+ """
+ tbname, tbinfo = get_tbinfo(request)
+ cache.cleanup(zone=tbname)
+ for a_dut in tbinfo['duts']:
+ cache.cleanup(zone=a_dut)
diff --git a/tests/copp/copp_utils.py b/tests/copp/copp_utils.py
index c4455d1817d..599b2bf1ba3 100644
--- a/tests/copp/copp_utils.py
+++ b/tests/copp/copp_utils.py
@@ -4,6 +4,7 @@
Todo:
Refactor ptfadapter so it can be leveraged in these test cases.
"""
+import re
DEFAULT_NN_TARGET_PORT = 3
@@ -12,20 +13,21 @@
_UPDATE_COPP_SCRIPT = "copp/scripts/update_copp_config.py"
_BASE_COPP_CONFIG = "/tmp/base_copp_config.json"
-_APP_DB_COPP_CONFIG = "swss:/etc/swss/config.d/00-copp.config.json"
+_APP_DB_COPP_CONFIG = ":/etc/swss/config.d/00-copp.config.json"
_CONFIG_DB_COPP_CONFIG = "/etc/sonic/copp_cfg.json"
_TEMP_COPP_CONFIG = "/tmp/copp_config.json"
_TEMP_COPP_TEMPLATE = "/tmp/copp.json.j2"
_COPP_TEMPLATE_PATH = "/usr/share/sonic/templates/copp.json.j2"
-_SWSS_COPP_TEMPLATE = "swss:" + _COPP_TEMPLATE_PATH
+_SWSS_COPP_TEMPLATE = ":" + _COPP_TEMPLATE_PATH
_PTF_NN_TEMPLATE = "templates/ptf_nn_agent.conf.ptf.j2"
_PTF_NN_DEST = "/etc/supervisor/conf.d/ptf_nn_agent.conf"
_SYNCD_NN_TEMPLATE = "templates/ptf_nn_agent.conf.dut.j2"
_SYNCD_NN_DEST = "/tmp/ptf_nn_agent.conf"
+_SYNCD_NN_FILE = "ptf_nn_agent.conf"
-def limit_policer(dut, pps_limit):
+def limit_policer(dut, pps_limit, nn_target_namespace):
"""
Updates the COPP configuration in the SWSS container to respect a given rate limit.
@@ -37,8 +39,12 @@ def limit_policer(dut, pps_limit):
pps_limit (int): The rate limit for COPP to enforce on ALL trap groups.
"""
+ asichost = dut.asic_instance_from_namespace(nn_target_namespace)
+
+ swss_docker_name = asichost.get_docker_name("swss")
+
if "201811" in dut.os_version or "201911" in dut.os_version:
- dut.command("docker cp {} {}".format(_APP_DB_COPP_CONFIG, _BASE_COPP_CONFIG))
+ dut.command("docker cp {} {}".format(swss_docker_name + _APP_DB_COPP_CONFIG, _BASE_COPP_CONFIG))
config_format = "app_db"
else:
dut.command("cp {} {}".format(_CONFIG_DB_COPP_CONFIG, _BASE_COPP_CONFIG))
@@ -53,17 +59,17 @@ def limit_policer(dut, pps_limit):
)
if config_format == "app_db":
- dut.command("docker cp {} {}".format(_TEMP_COPP_CONFIG, _APP_DB_COPP_CONFIG))
+ dut.command("docker cp {} {}".format(_TEMP_COPP_CONFIG, swss_docker_name + _APP_DB_COPP_CONFIG))
# As copp config is regenerated each time swss starts need to replace the template with
# config updated above. But before doing that need store the original template in a
# temporary file for restore after test.
- dut.command("docker cp {} {}".format(_SWSS_COPP_TEMPLATE, _TEMP_COPP_TEMPLATE))
- dut.command("docker cp {} {}".format(_TEMP_COPP_CONFIG, _SWSS_COPP_TEMPLATE))
+ dut.command("docker cp {} {}".format(swss_docker_name + _SWSS_COPP_TEMPLATE, _TEMP_COPP_TEMPLATE))
+ dut.command("docker cp {} {}".format(_TEMP_COPP_CONFIG, swss_docker_name + _SWSS_COPP_TEMPLATE))
else:
dut.command("cp {} {}".format(_TEMP_COPP_CONFIG, _CONFIG_DB_COPP_CONFIG))
-def restore_policer(dut):
+def restore_policer(dut, nn_target_namespace):
"""
Reloads the default COPP configuration in the SWSS container.
@@ -72,10 +78,14 @@ def restore_policer(dut):
The SWSS container must be restarted for the config change to take effect.
"""
+ asichost = dut.asic_instance_from_namespace(nn_target_namespace)
+
+ swss_docker_name = asichost.get_docker_name("swss")
+
# Restore the copp template in swss
if "201811" in dut.os_version or "201911" in dut.os_version:
- dut.command("docker cp {} {}".format(_BASE_COPP_CONFIG, _APP_DB_COPP_CONFIG))
- dut.command("docker cp {} {}".format(_TEMP_COPP_TEMPLATE, _SWSS_COPP_TEMPLATE))
+ dut.command("docker cp {} {}".format(_BASE_COPP_CONFIG, swss_docker_name + _APP_DB_COPP_CONFIG))
+ dut.command("docker cp {} {}".format(_TEMP_COPP_TEMPLATE, swss_docker_name + _SWSS_COPP_TEMPLATE))
else:
dut.command("cp {} {}".format(_BASE_COPP_CONFIG, _CONFIG_DB_COPP_CONFIG))
@@ -114,7 +124,7 @@ def restore_ptf(ptf):
ptf.supervisorctl(name="ptf_nn_agent", state="restarted")
-def configure_syncd(dut, nn_target_port, nn_target_interface, creds):
+def configure_syncd(dut, nn_target_port, nn_target_interface, nn_target_namespace, creds):
"""
Configures syncd to run the NN agent on the specified port.
@@ -125,22 +135,38 @@ def configure_syncd(dut, nn_target_port, nn_target_interface, creds):
Args:
dut (SonicHost): The target device.
nn_target_port (int): The port to run NN agent on.
- nn_target_interface (str): The Interface remote NN agents listens.
+ nn_target_interface (str): The Interface remote NN agents listen to
+ nn_target_namespace (str): The namespace remote NN agents listens
creds (dict): Credential information according to the dut inventory
"""
facts = {"nn_target_port": nn_target_port, "nn_target_interface": nn_target_interface}
dut.host.options["variable_manager"].extra_vars.update(facts)
- _install_nano(dut, creds)
+ asichost = dut.asic_instance_from_namespace(nn_target_namespace)
+
+ syncd_docker_name = asichost.get_docker_name("syncd")
+
+ _install_nano(dut, creds, syncd_docker_name)
dut.template(src=_SYNCD_NN_TEMPLATE, dest=_SYNCD_NN_DEST)
- dut.command("docker cp {} syncd:/etc/supervisor/conf.d/".format(_SYNCD_NN_DEST))
- dut.command("docker exec syncd supervisorctl reread")
- dut.command("docker exec syncd supervisorctl update")
+ dut.command("docker cp {} {}:/etc/supervisor/conf.d/".format(_SYNCD_NN_DEST, syncd_docker_name))
+
+ dut.command("docker exec {} supervisorctl reread".format(syncd_docker_name))
+ dut.command("docker exec {} supervisorctl update".format(syncd_docker_name))
+
+def restore_syncd(dut, nn_target_namespace):
+ asichost = dut.asic_instance_from_namespace(nn_target_namespace)
-def _install_nano(dut, creds):
+ syncd_docker_name = asichost.get_docker_name("syncd")
+
+ dut.command("docker exec {} rm -rf /etc/supervisor/conf.d/{}".format(syncd_docker_name, _SYNCD_NN_FILE))
+ dut.command("docker exec {} supervisorctl reread".format(syncd_docker_name))
+ dut.command("docker exec {} supervisorctl update".format(syncd_docker_name))
+
+
+def _install_nano(dut, creds, syncd_docker_name):
"""
Install nanomsg package to syncd container.
@@ -149,13 +175,13 @@ def _install_nano(dut, creds):
creds (dict): Credential information according to the dut inventory
"""
- output = dut.command("docker exec syncd bash -c '[ -d /usr/local/include/nanomsg ] || echo copp'")
+ output = dut.command("docker exec {} bash -c '[ -d /usr/local/include/nanomsg ] || echo copp'".format(syncd_docker_name))
if output["stdout"] == "copp":
http_proxy = creds.get('proxy_env', {}).get('http_proxy', '')
https_proxy = creds.get('proxy_env', {}).get('https_proxy', '')
- cmd = '''docker exec -e http_proxy={} -e https_proxy={} syncd bash -c " \
+ cmd = '''docker exec -e http_proxy={} -e https_proxy={} {} bash -c " \
rm -rf /var/lib/apt/lists/* \
&& apt-get update \
&& apt-get install -y python-pip build-essential libssl-dev python-dev python-setuptools wget cmake \
@@ -165,7 +191,7 @@ def _install_nano(dut, creds):
&& rm -f 1.0.0.tar.gz && pip2 install cffi==1.7.0 && pip2 install --upgrade cffi==1.7.0 && pip2 install nnpy \
&& mkdir -p /opt && cd /opt && wget https://raw.githubusercontent.com/p4lang/ptf/master/ptf_nn/ptf_nn_agent.py \
&& mkdir ptf && cd ptf && wget https://raw.githubusercontent.com/p4lang/ptf/master/src/ptf/afpacket.py && touch __init__.py \
- " '''.format(http_proxy, https_proxy)
+ " '''.format(http_proxy, https_proxy, syncd_docker_name)
dut.command(cmd)
def _map_port_number_to_interface(dut, nn_target_port):
@@ -175,3 +201,14 @@ def _map_port_number_to_interface(dut, nn_target_port):
interfaces = dut.command("portstat")["stdout_lines"][2:]
return interfaces[nn_target_port].split()[0]
+
+def _get_http_and_https_proxy_ip(creds):
+ """
+ Get the http and https proxy ip.
+
+ Args:
+ creds (dict): Credential information according to the dut inventory
+ """
+
+ return (re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('http_proxy', ''))[0],
+ re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('https_proxy', ''))[0])
diff --git a/tests/copp/test_copp.py b/tests/copp/test_copp.py
index 10ec576af42..0cd3ee3db69 100644
--- a/tests/copp/test_copp.py
+++ b/tests/copp/test_copp.py
@@ -43,7 +43,8 @@
"topo",
"myip",
"peerip",
- "nn_target_interface"])
+ "nn_target_interface",
+ "nn_target_namespace"])
_SUPPORTED_PTF_TOPOS = ["ptf32", "ptf64"]
_SUPPORTED_T1_TOPOS = ["t1", "t1-lag", "t1-64-lag"]
_TOR_ONLY_PROTOCOL = ["DHCP"]
@@ -112,8 +113,7 @@ def copp_testbed(
creds,
ptfhost,
tbinfo,
- request,
- disable_lldp_for_testing # usefixtures not supported on fixtures
+ request
):
"""
Pytest fixture to handle setup and cleanup for the COPP tests.
@@ -125,10 +125,12 @@ def copp_testbed(
pytest.skip("Topology not supported by COPP tests")
try:
- _setup_testbed(duthost, creds, ptfhost, test_params)
+ _setup_multi_asic_proxy(duthost, creds, test_params, tbinfo)
+ _setup_testbed(duthost, creds, ptfhost, test_params, tbinfo)
yield test_params
finally:
- _teardown_testbed(duthost, creds, ptfhost, test_params)
+ _teardown_multi_asic_proxy(duthost, creds, test_params, tbinfo)
+ _teardown_testbed(duthost, creds, ptfhost, test_params, tbinfo)
@pytest.fixture(autouse=True)
def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer):
@@ -143,10 +145,6 @@ def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer):
loganalyzer: Loganalyzer utility fixture
"""
ignoreRegex = [
- ".*ERR monit.*'lldpd_monitor' process is not running.*",
- ".*ERR monit.* 'lldp\|lldpd_monitor' status failed.*-- 'lldpd:' is not running.*",
- ".*ERR monit.*'lldp_syncd' process is not running.*",
- ".*ERR monit.*'lldp\|lldp_syncd' status failed.*'python2 -m lldp_syncd' is not running.*",
".*snmp#snmp-subagent.*",
".*kernel reports TIME_ERROR: 0x4041: Clock Unsynchronized.*"
]
@@ -207,80 +205,106 @@ def _gather_test_params(tbinfo, duthost, request):
peerip = bgp_peer["peer_addr"]
break
- logging.info("nn_target_port {} nn_target_interface {}".format(nn_target_port, nn_target_interface))
+ nn_target_namespace = mg_facts["minigraph_neighbors"][nn_target_interface]['namespace']
+
+ logging.info("nn_target_port {} nn_target_interface {} nn_target_namespace {}".format(nn_target_port, nn_target_interface, nn_target_namespace))
return _COPPTestParameters(nn_target_port=nn_target_port,
swap_syncd=swap_syncd,
topo=topo,
myip=myip,
peerip = peerip,
- nn_target_interface=nn_target_interface)
+ nn_target_interface=nn_target_interface,
+ nn_target_namespace=nn_target_namespace)
-def _setup_testbed(dut, creds, ptf, test_params):
+def _setup_testbed(dut, creds, ptf, test_params, tbinfo):
"""
Sets up the testbed to run the COPP tests.
"""
-
logging.info("Set up the PTF for COPP tests")
copp_utils.configure_ptf(ptf, test_params.nn_target_port)
logging.info("Update the rate limit for the COPP policer")
- copp_utils.limit_policer(dut, _TEST_RATE_LIMIT)
+ copp_utils.limit_policer(dut, _TEST_RATE_LIMIT, test_params.nn_target_namespace)
- if test_params.swap_syncd:
+ # Multi-asic will not support this mode as of now.
+ if test_params.swap_syncd and not dut.is_multi_asic:
logging.info("Swap out syncd to use RPC image...")
docker.swap_syncd(dut, creds)
else:
+ # Set sysctl RCVBUF parameter for tests
+ dut.command("sysctl -w net.core.rmem_max=609430500")
+
+ # Set sysctl SENDBUF parameter for tests
+ dut.command("sysctl -w net.core.wmem_max=609430500")
+
# NOTE: Even if the rpc syncd image is already installed, we need to restart
# SWSS for the COPP changes to take effect.
logging.info("Reloading config and restarting swss...")
config_reload(dut)
logging.info("Configure syncd RPC for testing")
- copp_utils.configure_syncd(dut, test_params.nn_target_port, test_params.nn_target_interface, creds)
+ copp_utils.configure_syncd(dut, test_params.nn_target_port, test_params.nn_target_interface,
+ test_params.nn_target_namespace, creds)
-def _teardown_testbed(dut, creds, ptf, test_params):
+def _teardown_testbed(dut, creds, ptf, test_params, tbinfo):
"""
Tears down the testbed, returning it to its initial state.
"""
-
logging.info("Restore PTF post COPP test")
copp_utils.restore_ptf(ptf)
logging.info("Restore COPP policer to default settings")
- copp_utils.restore_policer(dut)
+ copp_utils.restore_policer(dut, test_params.nn_target_namespace)
- if test_params.swap_syncd:
+ if test_params.swap_syncd and not dut.is_multi_asic:
logging.info("Restore default syncd docker...")
docker.restore_default_syncd(dut, creds)
else:
+ copp_utils.restore_syncd(dut, test_params.nn_target_namespace)
logging.info("Reloading config and restarting swss...")
config_reload(dut)
-
-@pytest.fixture(scope="class")
-def disable_lldp_for_testing(
- duthosts,
- rand_one_dut_hostname,
- disable_container_autorestart,
- enable_container_autorestart
-):
- """Disables LLDP during testing so that it doesn't interfere with the policer."""
- duthost = duthosts[rand_one_dut_hostname]
-
- logging.info("Disabling LLDP for the COPP tests")
-
- feature_list = ['lldp']
- disable_container_autorestart(duthost, testcase="test_copp", feature_list=feature_list)
-
- duthost.command("docker exec lldp supervisorctl stop lldp-syncd")
- duthost.command("docker exec lldp supervisorctl stop lldpd")
-
- yield
-
- logging.info("Restoring LLDP after the COPP tests")
-
- duthost.command("docker exec lldp supervisorctl start lldpd")
- duthost.command("docker exec lldp supervisorctl start lldp-syncd")
-
- enable_container_autorestart(duthost, testcase="test_copp", feature_list=feature_list)
+def _setup_multi_asic_proxy(dut, creds, test_params, tbinfo):
+ """
+ Sets up the testbed to run the COPP tests on multi-asic platfroms via setting proxy.
+ """
+ if not dut.is_multi_asic:
+ return
+
+ logging.info("Adding iptables rules and enabling eth0 port forwarding")
+ http_proxy, https_proxy = copp_utils._get_http_and_https_proxy_ip(creds)
+ # Add IP Table rule for http and ptf nn_agent traffic.
+ dut.command("sudo sysctl net.ipv4.conf.eth0.forwarding=1")
+ mgmt_ip = dut.host.options["inventory_manager"].get_host(dut.hostname).vars["ansible_host"]
+ # Add Rule to communicate to http/s proxy from namespace
+ dut.command("sudo iptables -t nat -A POSTROUTING -p tcp --dport 8080 -j SNAT --to-source {}".format(mgmt_ip))
+ dut.command("sudo ip -n {} rule add from all to {} pref 1 lookup default".format(test_params.nn_target_namespace, http_proxy))
+ if http_proxy != https_proxy:
+ dut.command("sudo ip -n {} rule add from all to {} pref 2 lookup default".format(test_params.nn_target_namespace, https_proxy))
+ # Add Rule to communicate to ptf nn agent client from namespace
+ ns_ip = dut.shell("sudo ip -n {} -4 -o addr show eth0".format(test_params.nn_target_namespace) + " | awk '{print $4}' | cut -d'/' -f1")["stdout"]
+ dut.command("sudo iptables -t nat -A PREROUTING -p tcp --dport 10900 -j DNAT --to-destination {}".format(ns_ip))
+ dut.command("sudo ip -n {} rule add from {} to {} pref 3 lookup default".format(test_params.nn_target_namespace, ns_ip, tbinfo["ptf_ip"]))
+
+def _teardown_multi_asic_proxy(dut, creds, test_params, tbinfo):
+ """
+ Tears down multi asic proxy settings, returning it to its initial state.
+ """
+ if not dut.is_multi_asic:
+ return
+
+ logging.info("Removing iptables rules and disabling eth0 port forwarding")
+ http_proxy, https_proxy = copp_utils._get_http_and_https_proxy_ip(creds)
+ dut.command("sudo sysctl net.ipv4.conf.eth0.forwarding=0")
+ # Delete IP Table rule for http and ptf nn_agent traffic.
+ mgmt_ip = dut.host.options["inventory_manager"].get_host(dut.hostname).vars["ansible_host"]
+ # Delete Rule to communicate to http/s proxy from namespace
+ dut.command("sudo iptables -t nat -D POSTROUTING -p tcp --dport 8080 -j SNAT --to-source {}".format(mgmt_ip))
+ dut.command("sudo ip -n {} rule delete from all to {} pref 1 lookup default".format(test_params.nn_target_namespace, http_proxy))
+ if http_proxy != https_proxy:
+ dut.command("sudo ip -n {} rule delete from all to {} pref 2 lookup default".format(test_params.nn_target_namespace, https_proxy))
+ # Delete Rule to communicate to ptf nn agent client from namespace
+ ns_ip = dut.shell("sudo ip -n {} -4 -o addr show eth0".format(test_params.nn_target_namespace) + " | awk '{print $4}' | cut -d'/' -f1")["stdout"]
+ dut.command("sudo iptables -t nat -D PREROUTING -p tcp --dport 10900 -j DNAT --to-destination {}".format(ns_ip))
+ dut.command("sudo ip -n {} rule delete from {} to {} pref 3 lookup default".format(test_params.nn_target_namespace, ns_ip, tbinfo["ptf_ip"]))
diff --git a/tests/dhcp_relay/test_dhcp_relay.py b/tests/dhcp_relay/test_dhcp_relay.py
index f5537b2ae56..8d167df818e 100644
--- a/tests/dhcp_relay/test_dhcp_relay.py
+++ b/tests/dhcp_relay/test_dhcp_relay.py
@@ -14,6 +14,9 @@
BROADCAST_MAC = 'ff:ff:ff:ff:ff:ff'
DEFAULT_DHCP_CLIENT_PORT = 68
+SINGLE_TOR_MODE = 'single'
+DUAL_TOR_MODE = 'dual'
+
@pytest.fixture(autouse=True)
def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer):
@@ -42,6 +45,8 @@ def dut_dhcp_relay_data(duthosts, rand_one_dut_hostname, ptfhost, tbinfo):
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
host_facts = duthost.setup()['ansible_facts']
+ switch_loopback_ip = mg_facts['minigraph_lo_interfaces'][0]['addr']
+
# SONiC spawns one DHCP relay agent per VLAN interface configured on the DUT
vlan_dict = mg_facts['minigraph_vlans']
for vlan_iface_name, vlan_info_dict in vlan_dict.items():
@@ -94,10 +99,13 @@ def dut_dhcp_relay_data(duthosts, rand_one_dut_hostname, ptfhost, tbinfo):
dhcp_relay_data['client_iface'] = client_iface
dhcp_relay_data['uplink_interfaces'] = uplink_interfaces
dhcp_relay_data['uplink_port_indices'] = uplink_port_indices
+ dhcp_relay_data['switch_loopback_ip'] = str(switch_loopback_ip)
+
dhcp_relay_data_list.append(dhcp_relay_data)
return dhcp_relay_data_list
+
@pytest.fixture(scope="module")
def validate_dut_routes_exist(duthosts, rand_one_dut_hostname, dut_dhcp_relay_data):
"""Fixture to valid a route to each DHCP server exist
@@ -112,12 +120,61 @@ def validate_dut_routes_exist(duthosts, rand_one_dut_hostname, dut_dhcp_relay_da
assert len(rtInfo["nexthops"]) > 0, "Failed to find route to DHCP server '{0}'".format(dhcp_server)
-def test_dhcp_relay_default(duthosts, rand_one_dut_hostname, ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist):
+def restart_dhcp_service(duthost):
+ duthost.shell('systemctl reset-failed dhcp_relay')
+ duthost.shell('systemctl restart dhcp_relay')
+ duthost.shell('systemctl reset-failed dhcp_relay')
+
+ for retry in range(5):
+ time.sleep(30)
+ dhcp_status = duthost.shell('docker container top dhcp_relay | grep dhcrelay | cat')["stdout"]
+ if dhcp_status != "":
+ break
+ else:
+ assert False, "Failed to restart dhcp docker"
+
+ time.sleep(30)
+
+
+def get_subtype_from_configdb(duthost):
+ # HEXISTS returns 1 if the key exists, otherwise 0
+ subtype_exist = int(duthost.shell('redis-cli -n 4 HEXISTS "DEVICE_METADATA|localhost" "subtype"')["stdout"])
+ subtype_value = ""
+ if subtype_exist:
+ subtype_value = duthost.shell('redis-cli -n 4 HGET "DEVICE_METADATA|localhost" "subtype"')["stdout"]
+ return subtype_exist, subtype_value
+
+
+@pytest.fixture(scope="module", params=[SINGLE_TOR_MODE, DUAL_TOR_MODE])
+def testing_config(request, duthosts, rand_one_dut_hostname):
+ testing_mode = request.param
+ duthost = duthosts[rand_one_dut_hostname]
+ subtype_exist, subtype_value = get_subtype_from_configdb(duthost)
+
+ if testing_mode == SINGLE_TOR_MODE:
+ if subtype_exist:
+ duthost.shell('redis-cli -n 4 HDEL "DEVICE_METADATA|localhost" "subtype"')
+ restart_dhcp_service(duthost)
+
+ if testing_mode == DUAL_TOR_MODE:
+ if not subtype_exist or subtype_value != 'DualToR':
+ duthost.shell('redis-cli -n 4 HSET "DEVICE_METADATA|localhost" "subtype" "DualToR"')
+ restart_dhcp_service(duthost)
+
+ yield testing_mode, duthost
+
+ if testing_mode == DUAL_TOR_MODE:
+ duthost.shell('redis-cli -n 4 HDEL "DEVICE_METADATA|localhost" "subtype"')
+ restart_dhcp_service(duthost)
+
+
+def test_dhcp_relay_default(ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist, testing_config):
"""Test DHCP relay functionality on T0 topology.
For each DHCP relay agent running on the DuT, verify DHCP packets are relayed properly
"""
- duthost = duthosts[rand_one_dut_hostname]
+ testing_mode, duthost = testing_config
+
for dhcp_relay in dut_dhcp_relay_data:
# Run the DHCP relay test on the PTF host
ptf_runner(ptfhost,
@@ -134,17 +191,20 @@ def test_dhcp_relay_default(duthosts, rand_one_dut_hostname, ptfhost, dut_dhcp_r
"relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']),
"relay_iface_netmask": str(dhcp_relay['downlink_vlan_iface']['mask']),
"dest_mac_address": BROADCAST_MAC,
- "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT},
+ "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT,
+ "switch_loopback_ip": dhcp_relay['switch_loopback_ip'],
+ "testing_mode": testing_mode},
log_file="/tmp/dhcp_relay_test.DHCPTest.log")
-def test_dhcp_relay_after_link_flap(duthosts, rand_one_dut_hostname, ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist):
+def test_dhcp_relay_after_link_flap(ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist, testing_config):
"""Test DHCP relay functionality on T0 topology after uplinks flap
For each DHCP relay agent running on the DuT, with relay agent running, flap the uplinks,
then test whether the DHCP relay agent relays packets properly.
"""
- duthost = duthosts[rand_one_dut_hostname]
+ testing_mode, duthost = testing_config
+
for dhcp_relay in dut_dhcp_relay_data:
# Bring all uplink interfaces down
for iface in dhcp_relay['uplink_interfaces']:
@@ -175,18 +235,21 @@ def test_dhcp_relay_after_link_flap(duthosts, rand_one_dut_hostname, ptfhost, du
"relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']),
"relay_iface_netmask": str(dhcp_relay['downlink_vlan_iface']['mask']),
"dest_mac_address": BROADCAST_MAC,
- "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT},
+ "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT,
+ "switch_loopback_ip": dhcp_relay['switch_loopback_ip'],
+ "testing_mode": testing_mode},
log_file="/tmp/dhcp_relay_test.DHCPTest.log")
-def test_dhcp_relay_start_with_uplinks_down(duthosts, rand_one_dut_hostname, ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist):
+def test_dhcp_relay_start_with_uplinks_down(ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist, testing_config):
"""Test DHCP relay functionality on T0 topology when relay agent starts with uplinks down
For each DHCP relay agent running on the DuT, bring the uplinks down, then restart the
relay agent while the uplinks are still down. Then test whether the DHCP relay agent
relays packets properly.
"""
- duthost = duthosts[rand_one_dut_hostname]
+ testing_mode, duthost = testing_config
+
for dhcp_relay in dut_dhcp_relay_data:
# Bring all uplink interfaces down
for iface in dhcp_relay['uplink_interfaces']:
@@ -224,16 +287,19 @@ def test_dhcp_relay_start_with_uplinks_down(duthosts, rand_one_dut_hostname, ptf
"relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']),
"relay_iface_netmask": str(dhcp_relay['downlink_vlan_iface']['mask']),
"dest_mac_address": BROADCAST_MAC,
- "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT},
+ "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT,
+ "switch_loopback_ip": dhcp_relay['switch_loopback_ip'],
+ "testing_mode": testing_mode},
log_file="/tmp/dhcp_relay_test.DHCPTest.log")
-def test_dhcp_relay_unicast_mac(duthosts, rand_one_dut_hostname, ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist):
+def test_dhcp_relay_unicast_mac(ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist, testing_config):
"""Test DHCP relay functionality on T0 topology with unicast mac
Instead of using broadcast MAC, use unicast MAC of DUT and verify that DHCP relay functionality is entact.
"""
- duthost = duthosts[rand_one_dut_hostname]
+ testing_mode, duthost = testing_config
+
for dhcp_relay in dut_dhcp_relay_data:
# Run the DHCP relay test on the PTF host
ptf_runner(ptfhost,
@@ -250,17 +316,20 @@ def test_dhcp_relay_unicast_mac(duthosts, rand_one_dut_hostname, ptfhost, dut_dh
"relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']),
"relay_iface_netmask": str(dhcp_relay['downlink_vlan_iface']['mask']),
"dest_mac_address": duthost.facts["router_mac"],
- "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT},
+ "client_udp_src_port": DEFAULT_DHCP_CLIENT_PORT,
+ "switch_loopback_ip": dhcp_relay['switch_loopback_ip'],
+ "testing_mode": testing_mode},
log_file="/tmp/dhcp_relay_test.DHCPTest.log")
-def test_dhcp_relay_random_sport(duthosts, rand_one_dut_hostname, ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist):
+def test_dhcp_relay_random_sport(ptfhost, dut_dhcp_relay_data, validate_dut_routes_exist, testing_config):
"""Test DHCP relay functionality on T0 topology with random source port (sport)
If the client is SNAT'd, the source port could be changed to a non-standard port (i.e., not 68).
Verify that DHCP relay works with random high sport.
"""
- duthost = duthosts[rand_one_dut_hostname]
+ testing_mode, duthost = testing_config
+
RANDOM_CLIENT_PORT = random.choice(range(1000, 65535))
for dhcp_relay in dut_dhcp_relay_data:
# Run the DHCP relay test on the PTF host
@@ -278,5 +347,7 @@ def test_dhcp_relay_random_sport(duthosts, rand_one_dut_hostname, ptfhost, dut_d
"relay_iface_mac": str(dhcp_relay['downlink_vlan_iface']['mac']),
"relay_iface_netmask": str(dhcp_relay['downlink_vlan_iface']['mask']),
"dest_mac_address": BROADCAST_MAC,
- "client_udp_src_port": RANDOM_CLIENT_PORT},
+ "client_udp_src_port": RANDOM_CLIENT_PORT,
+ "switch_loopback_ip": dhcp_relay['switch_loopback_ip'],
+ "testing_mode": testing_mode},
log_file="/tmp/dhcp_relay_test.DHCPTest.log")
diff --git a/tests/drop_packets/test_drop_counters.py b/tests/drop_packets/test_drop_counters.py
index f45e2f25e0c..1a9169ec976 100755
--- a/tests/drop_packets/test_drop_counters.py
+++ b/tests/drop_packets/test_drop_counters.py
@@ -1,16 +1,16 @@
import logging
import os
-import re
import time
import pytest
import yaml
-import json
+import re
import ptf.packet as packet
import ptf.testutils as testutils
from tests.common.helpers.assertions import pytest_assert
from tests.common.utilities import wait_until
+from tests.common.helpers.drop_counters.drop_counters import verify_drop_counters, ensure_no_l3_drops, ensure_no_l2_drops
from drop_packets import * # FIXME
pytestmark = [
@@ -69,6 +69,27 @@ def enable_counters(duthosts, rand_one_dut_hostname):
duthost.command(CMD_PREFIX + "counterpoll {} disable".format(port))
+@pytest.fixture(scope='module', autouse=True)
+def parse_combined_counters(duthosts, rand_one_dut_hostname):
+ duthost = duthosts[rand_one_dut_hostname]
+ # Get info whether L2 and L3 drop counters are linked
+ # Or ACL and L2 drop counters are linked
+ global COMBINED_L2L3_DROP_COUNTER, COMBINED_ACL_DROP_COUNTER
+ base_dir = os.path.dirname(os.path.realpath(__file__))
+ with open(os.path.join(base_dir, "combined_drop_counters.yml")) as stream:
+ regexps = yaml.safe_load(stream)
+ if regexps["l2_l3"]:
+ for item in regexps["l2_l3"]:
+ if re.match(item, duthost.facts["platform"]):
+ COMBINED_L2L3_DROP_COUNTER = True
+ break
+ if regexps["acl_l2"]:
+ for item in regexps["acl_l2"]:
+ if re.match(item, duthost.facts["platform"]):
+ COMBINED_ACL_DROP_COUNTER = True
+ break
+
+
@pytest.fixture
def acl_setup(duthosts, rand_one_dut_hostname, loganalyzer):
""" Create acl rule defined in config file. Delete rule after test case finished """
@@ -105,105 +126,6 @@ def acl_setup(duthosts, rand_one_dut_hostname, loganalyzer):
time.sleep(ACL_COUNTERS_UPDATE_INTERVAL)
-@pytest.fixture(scope='module', autouse=True)
-def parse_combined_counters(duthosts, rand_one_dut_hostname):
- duthost = duthosts[rand_one_dut_hostname]
- # Get info whether L2 and L3 drop counters are linked
- # Or ACL and L2 drop counters are linked
- global COMBINED_L2L3_DROP_COUNTER, COMBINED_ACL_DROP_COUNTER
- base_dir = os.path.dirname(os.path.realpath(__file__))
- with open(os.path.join(base_dir, "combined_drop_counters.yml")) as stream:
- regexps = yaml.safe_load(stream)
- if regexps["l2_l3"]:
- for item in regexps["l2_l3"]:
- if re.match(item, duthost.facts["platform"]):
- COMBINED_L2L3_DROP_COUNTER = True
- break
- if regexps["acl_l2"]:
- for item in regexps["acl_l2"]:
- if re.match(item, duthost.facts["platform"]):
- COMBINED_ACL_DROP_COUNTER = True
- break
-
-def get_pkt_drops(duthost, cli_cmd, asic_index):
- """
- @summary: Parse output of "portstat" or "intfstat" commands and convert it to the dictionary.
- @param module: The AnsibleModule object
- @param cli_cmd: one of supported CLI commands - "portstat -j" or "intfstat -j"
- @return: Return dictionary of parsed counters
- """
- # Get namespace from asic_index.
- namespace = duthost.get_namespace_from_asic_id(asic_index)
-
- # Frame the correct cli command
- # the L2 commands need _SUFFIX and L3 commands need _PREFIX
- if cli_cmd == GET_L3_COUNTERS:
- CMD_PREFIX = NAMESPACE_PREFIX if duthost.is_multi_asic else ''
- cli_cmd = CMD_PREFIX + cli_cmd
- elif cli_cmd == GET_L2_COUNTERS:
- CMD_SUFFIX = NAMESPACE_SUFFIX if duthost.is_multi_asic else ''
- cli_cmd = cli_cmd + CMD_SUFFIX
-
- stdout = duthost.command(cli_cmd.format(namespace))
- stdout = stdout["stdout"]
- match = re.search("Last cached time was.*\n", stdout)
- if match:
- stdout = re.sub("Last cached time was.*\n", "", stdout)
-
- try:
- return json.loads(stdout)
- except Exception as err:
- raise Exception("Failed to parse output of '{}', err={}".format(cli_cmd, str(err)))
-
-
-def ensure_no_l3_drops(duthost, asic_index):
- """ Verify L3 drop counters were not incremented """
- intf_l3_counters = get_pkt_drops(duthost, GET_L3_COUNTERS, asic_index)
- unexpected_drops = {}
- for iface, value in intf_l3_counters.items():
- try:
- rx_err_value = int(value[RX_ERR])
- except ValueError as err:
- logger.info("Unable to verify L3 drops on iface {}, L3 counters may not be supported on this platform\n{}".format(iface, err))
- continue
- if rx_err_value >= PKT_NUMBER:
- unexpected_drops[iface] = rx_err_value
- if unexpected_drops:
- pytest.fail("L3 'RX_ERR' was incremented for the following interfaces:\n{}".format(unexpected_drops))
-
-
-def ensure_no_l2_drops(duthost, asic_index):
- """ Verify L2 drop counters were not incremented """
- intf_l2_counters = get_pkt_drops(duthost, GET_L2_COUNTERS, asic_index)
- unexpected_drops = {}
- for iface, value in intf_l2_counters.items():
- try:
- rx_drp_value = int(value[RX_DRP])
- except ValueError as err:
- logger.warning("Unable to verify L2 drops on iface {}\n{}".format(iface, err))
- continue
- if rx_drp_value >= PKT_NUMBER:
- unexpected_drops[iface] = rx_drp_value
- if unexpected_drops:
- pytest.fail("L2 'RX_DRP' was incremented for the following interfaces:\n{}".format(unexpected_drops))
-
-
-def str_to_int(value):
- """ Convert string value which can contain ',' symbols to integer value """
- return int(value.replace(",", ""))
-
-
-def verify_drop_counters(duthost, asic_index, dut_iface, get_cnt_cli_cmd, column_key):
- """ Verify drop counter incremented on specific interface """
- get_drops = lambda: int(get_pkt_drops(duthost, get_cnt_cli_cmd, asic_index)[dut_iface][column_key].replace(",", ""))
- check_drops_on_dut = lambda: PKT_NUMBER == get_drops()
- if not wait_until(5, 1, check_drops_on_dut):
- fail_msg = "'{}' drop counter was not incremented on iface {}. DUT {} == {}; Sent == {}".format(
- column_key, dut_iface, column_key, get_drops(), PKT_NUMBER
- )
- pytest.fail(fail_msg)
-
-
def base_verification(discard_group, pkt, ptfadapter, duthost, asic_index, ports_info, tx_dut_ports=None):
"""
Base test function for verification of L2 or L3 packet drops. Verification type depends on 'discard_group' value.
@@ -219,18 +141,18 @@ def base_verification(discard_group, pkt, ptfadapter, duthost, asic_index, ports
send_packets(pkt, duthost, ptfadapter, ports_info["ptf_tx_port_id"], PKT_NUMBER)
if discard_group == "L2":
- verify_drop_counters(duthost, asic_index, ports_info["dut_iface"], GET_L2_COUNTERS, L2_COL_KEY)
- ensure_no_l3_drops(duthost, asic_index)
+ verify_drop_counters(duthost, asic_index, ports_info["dut_iface"], GET_L2_COUNTERS, L2_COL_KEY, packets_count=PKT_NUMBER)
+ ensure_no_l3_drops(duthost, asic_index, packets_count=PKT_NUMBER)
elif discard_group == "L3":
if COMBINED_L2L3_DROP_COUNTER:
- verify_drop_counters(duthost, asic_index, ports_info["dut_iface"], GET_L2_COUNTERS, L2_COL_KEY)
- ensure_no_l3_drops(duthost, asic_index)
+ verify_drop_counters(duthost, asic_index, ports_info["dut_iface"], GET_L2_COUNTERS, L2_COL_KEY, packets_count=PKT_NUMBER)
+ ensure_no_l3_drops(duthost, asic_index, packets_count=PKT_NUMBER)
else:
if not tx_dut_ports:
pytest.fail("No L3 interface specified")
- verify_drop_counters(duthost, asic_index, tx_dut_ports[ports_info["dut_iface"]], GET_L3_COUNTERS, L3_COL_KEY)
- ensure_no_l2_drops(duthost, asic_index)
+ verify_drop_counters(duthost, asic_index, tx_dut_ports[ports_info["dut_iface"]], GET_L3_COUNTERS, L3_COL_KEY, packets_count=PKT_NUMBER)
+ ensure_no_l2_drops(duthost, asic_index, packets_count=PKT_NUMBER)
elif discard_group == "ACL":
if not tx_dut_ports:
pytest.fail("No L3 interface specified")
@@ -243,11 +165,11 @@ def base_verification(discard_group, pkt, ptfadapter, duthost, asic_index, ports
)
pytest.fail(fail_msg)
if not COMBINED_ACL_DROP_COUNTER:
- ensure_no_l3_drops(duthost, asic_index)
- ensure_no_l2_drops(duthost, asic_index)
+ ensure_no_l3_drops(duthost, asic_index, packets_count=PKT_NUMBER)
+ ensure_no_l2_drops(duthost, asic_index, packets_count=PKT_NUMBER)
elif discard_group == "NO_DROPS":
- ensure_no_l2_drops(duthost, asic_index)
- ensure_no_l3_drops(duthost, asic_index)
+ ensure_no_l2_drops(duthost, asic_index, packets_count=PKT_NUMBER)
+ ensure_no_l3_drops(duthost, asic_index, packets_count=PKT_NUMBER)
else:
pytest.fail("Incorrect 'discard_group' specified. Supported values: 'L2', 'L3', 'ACL' or 'NO_DROPS'")
diff --git a/tests/dualtor/conftest.py b/tests/dualtor/conftest.py
index 98223463129..4e26a9ec4ff 100644
--- a/tests/dualtor/conftest.py
+++ b/tests/dualtor/conftest.py
@@ -1,2 +1,34 @@
-from tests.common.dualtor.mux_simulator_control import mux_server_url, toggle_all_simulator_ports_to_rand_selected_tor # lgtm[py/unused-import]
+import pytest
+import logging
+import time
+from tests.common.dualtor.dual_tor_utils import get_crm_nexthop_counter, lower_tor_host # lgtm[py/unused-import]
+from tests.common.helpers.assertions import pytest_assert as py_assert
+
+
+CRM_POLL_INTERVAL = 1
+CRM_DEFAULT_POLL_INTERVAL = 300
+
+
+@pytest.fixture
+def set_crm_polling_interval(lower_tor_host):
+ """
+ A function level fixture to set crm polling interval to 1 second
+ """
+ wait_time = 2
+ lower_tor_host.command("crm config polling interval {}".format(CRM_POLL_INTERVAL))
+ logging.info("Waiting {} sec for CRM counters to become updated".format(wait_time))
+ time.sleep(wait_time)
+ yield
+ lower_tor_host.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))
+
+
+@pytest.fixture
+def verify_crm_nexthop_counter_not_increased(lower_tor_host):
+ """
+ A function level fixture to verify crm nexthop counter not increased
+ """
+ original_counter = get_crm_nexthop_counter(lower_tor_host)
+ yield
+ diff = get_crm_nexthop_counter(lower_tor_host) - original_counter
+ py_assert(diff == 0, "crm nexthop counter is increased by {}.".format(diff))
diff --git a/tests/dualtor/test_ipinip.py b/tests/dualtor/test_ipinip.py
new file mode 100644
index 00000000000..4435b64c827
--- /dev/null
+++ b/tests/dualtor/test_ipinip.py
@@ -0,0 +1,132 @@
+"""
+1. Send IPinIP packets from t1 to ToR.
+2. Check that for inner packet that has destination IP as active server IP, the packet
+is decapsulated and forwarded to server port.
+3. Check that for inner packet that has destination IP as standby server IP, the packet
+is not forwarded to server port or re-encapsulated to T1s.
+"""
+import logging
+import pytest
+import random
+
+from ptf import mask
+from ptf import testutils
+from scapy.all import Ether, IP
+from tests.common.dualtor.dual_tor_mock import *
+from tests.common.dualtor.dual_tor_utils import get_t1_ptf_ports
+from tests.common.dualtor.dual_tor_utils import rand_selected_interface
+from tests.common.dualtor.tunnel_traffic_utils import tunnel_traffic_monitor
+from tests.common.utilities import is_ipv4_address
+
+
+pytestmark = [
+ pytest.mark.topology("t0")
+]
+
+
+@pytest.fixture(scope="function")
+def build_encapsulated_packet(rand_selected_interface, ptfadapter, rand_selected_dut, tunnel_traffic_monitor):
+ """Build the encapsulated packet sent from T1 to ToR."""
+ tor = rand_selected_dut
+ _, server_ips = rand_selected_interface
+ server_ipv4 = server_ips["server_ipv4"].split("/")[0]
+ config_facts = tor.get_running_config_facts()
+ try:
+ peer_ipv4_address = [_["address_ipv4"] for _ in config_facts["PEER_SWITCH"].values()][0]
+ except IndexError:
+ raise ValueError("Failed to get peer ToR address from CONFIG_DB")
+
+ tor_ipv4_address = [_ for _ in config_facts["LOOPBACK_INTERFACE"]["Loopback0"]
+ if is_ipv4_address(_.split("/")[0])][0]
+ tor_ipv4_address = tor_ipv4_address.split("/")[0]
+
+ inner_dscp = random.choice(range(0, 33))
+ inner_ttl = random.choice(range(3, 65))
+ inner_packet = testutils.simple_ip_packet(
+ ip_src="1.1.1.1",
+ ip_dst=server_ipv4,
+ ip_dscp=inner_dscp,
+ ip_ttl=inner_ttl
+ )[IP]
+ packet = testutils.simple_ipv4ip_packet(
+ eth_dst=tor.facts["router_mac"],
+ eth_src=ptfadapter.dataplane.get_mac(0, 0),
+ ip_src=peer_ipv4_address,
+ ip_dst=tor_ipv4_address,
+ ip_dscp=inner_dscp,
+ ip_ttl=255,
+ inner_frame=inner_packet
+ )
+ logging.info("the encapsulated packet to send:\n%s", tunnel_traffic_monitor._dump_show_str(packet))
+ return packet
+
+
+def get_ptf_server_intf_index(tor, tbinfo, iface):
+ """Get the index of ptf ToR-facing interface on ptf."""
+ mg_facts = tor.get_extended_minigraph_facts(tbinfo)
+ return mg_facts["minigraph_ptf_indices"][iface]
+
+
+def build_expected_packet_to_server(encapsulated_packet):
+ """Build packet expected to be received by server from the tunnel packet."""
+ inner_packet = encapsulated_packet[IP].payload[IP].copy()
+ # use dummy mac address that will be ignored in mask
+ inner_packet = Ether(src="aa:bb:cc:dd:ee:ff", dst="aa:bb:cc:dd:ee:ff") / inner_packet
+ exp_pkt = mask.Mask(inner_packet)
+ exp_pkt.set_do_not_care_scapy(Ether, "dst")
+ exp_pkt.set_do_not_care_scapy(Ether, "src")
+ exp_pkt.set_do_not_care_scapy(IP, "tos")
+ exp_pkt.set_do_not_care_scapy(IP, "ttl")
+ exp_pkt.set_do_not_care_scapy(IP, "chksum")
+ return exp_pkt
+
+
+def test_decap_active_tor(
+ apply_mock_dual_tor_tables,
+ apply_mock_dual_tor_kernel_configs,
+ apply_active_state_to_orchagent,
+ build_encapsulated_packet, rand_selected_interface, ptfadapter,
+ tbinfo, rand_selected_dut, tunnel_traffic_monitor
+):
+ tor = rand_selected_dut
+ encapsulated_packet = build_encapsulated_packet
+ iface, _ = rand_selected_interface
+
+ exp_ptf_port_index = get_ptf_server_intf_index(tor, tbinfo, iface)
+ exp_pkt = build_expected_packet_to_server(encapsulated_packet)
+
+ ptfadapter.dataplane.flush()
+ ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo))
+ logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf)
+ testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet, count=1)
+ _, rec_pkt = testutils.verify_packet_any_port(ptfadapter, exp_pkt, ports=[exp_ptf_port_index])
+ rec_pkt = Ether(rec_pkt)
+ logging.info("received decap packet:\n%s", tunnel_traffic_monitor._dump_show_str(rec_pkt))
+ exp_ttl = encapsulated_packet[IP].payload[IP].ttl - 1
+ exp_tos = encapsulated_packet[IP].payload[IP].tos
+ if rec_pkt[IP].ttl != exp_ttl:
+ pytest.fail("the expected ttl should be %s" % exp_ttl)
+ if rec_pkt[IP].tos != exp_tos:
+ pytest.fail("the expected tos should be %s" % exp_tos)
+
+
+def test_decap_standby_tor(
+ apply_mock_dual_tor_tables,
+ apply_mock_dual_tor_kernel_configs,
+ apply_standby_state_to_orchagent,
+ build_encapsulated_packet, rand_selected_interface, ptfadapter,
+ tbinfo, rand_selected_dut, tunnel_traffic_monitor
+):
+ tor = rand_selected_dut
+ encapsulated_packet = build_encapsulated_packet
+ iface, _ = rand_selected_interface
+
+ exp_ptf_port_index = get_ptf_server_intf_index(tor, tbinfo, iface)
+ exp_pkt = build_expected_packet_to_server(encapsulated_packet)
+
+ ptf_t1_intf = random.choice(get_t1_ptf_ports(tor, tbinfo))
+ logging.info("send encapsulated packet from ptf t1 interface %s", ptf_t1_intf)
+ with tunnel_traffic_monitor(tor, existing=False):
+ testutils.send(ptfadapter, int(ptf_t1_intf.strip("eth")), encapsulated_packet, count=1)
+
+ testutils.verify_no_packet_any(ptfadapter, exp_pkt, ports=[exp_ptf_port_index])
diff --git a/tests/dualtor/test_normal_op.py b/tests/dualtor/test_normal_op.py
new file mode 100644
index 00000000000..7fe6127bb7c
--- /dev/null
+++ b/tests/dualtor/test_normal_op.py
@@ -0,0 +1,148 @@
+import pytest
+
+from tests.common.config_reload import config_reload
+from tests.common.dualtor.control_plane_utils import verify_tor_states
+from tests.common.dualtor.data_plane_utils import send_t1_to_server_with_action, send_server_to_t1_with_action # lgtm[py/unused-import]
+from tests.common.dualtor.dual_tor_utils import upper_tor_host, lower_tor_host, force_active_tor # lgtm[py/unused-import]
+from tests.common.dualtor.mux_simulator_control import toggle_all_simulator_ports_to_upper_tor, toggle_all_simulator_ports_to_lower_tor # lgtm[py/unused-import]
+from tests.common.fixtures.ptfhost_utils import run_icmp_responder, run_garp_service, copy_ptftests_directory, change_mac_addresses # lgtm[py/unused-import]
+
+pytestmark = [
+ pytest.mark.topology("dualtor")
+]
+
+
+def test_normal_op_upstream(upper_tor_host, lower_tor_host,
+ send_server_to_t1_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """Send upstream traffic and confirm no disruption or switchover occurs"""
+ send_server_to_t1_with_action(upper_tor_host, verify=True)
+ verify_tor_states(expected_active_host=upper_tor_host,
+ expected_standby_host=lower_tor_host)
+
+
+def test_normal_op_downstream_active(upper_tor_host, lower_tor_host,
+ send_t1_to_server_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """
+ Send downstream traffic to the active ToR and confirm no disruption or
+ switchover occurs
+ """
+ send_t1_to_server_with_action(upper_tor_host, verify=True)
+ verify_tor_states(expected_active_host=upper_tor_host,
+ expected_standby_host=lower_tor_host)
+
+
+def test_normal_op_downstream_standby(upper_tor_host, lower_tor_host,
+ send_t1_to_server_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """
+ Send downstream traffic to the standby ToR and confirm no disruption or
+ switchover occurs
+ """
+ send_t1_to_server_with_action(lower_tor_host, verify=True)
+ verify_tor_states(expected_active_host=upper_tor_host,
+ expected_standby_host=lower_tor_host)
+
+
+def test_active_config_reload_upstream(upper_tor_host, lower_tor_host,
+ send_server_to_t1_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """
+ Send upstream traffic and `config reload` the active ToR. Confirm
+ switchover occurs and disruption lasted < 1 second
+ """
+ send_server_to_t1_with_action(upper_tor_host, verify=True, delay=1,
+ action=lambda: config_reload(upper_tor_host,
+ wait=0))
+ verify_tor_states(expected_active_host=lower_tor_host,
+ expected_standby_host=upper_tor_host)
+
+
+def test_standby_config_reload_upstream(upper_tor_host, lower_tor_host,
+ send_server_to_t1_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """
+ Send upstream traffic and `config reload` the standby ToR. Confirm no
+ switchover occurs and no disruption
+ """
+ send_server_to_t1_with_action(upper_tor_host, verify=True, delay=1,
+ action=lambda: config_reload(lower_tor_host,
+ wait=0))
+ verify_tor_states(expected_active_host=upper_tor_host,
+ expected_standby_host=lower_tor_host)
+
+
+def test_standby_config_reload_downstream_active(upper_tor_host,
+ lower_tor_host,
+ send_t1_to_server_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """
+ Send downstream traffic to the active ToR and `config reload` the
+ standby ToR. Confirm no switchover occurs and no disruption
+ """
+ send_t1_to_server_with_action(upper_tor_host, verify=True,
+ action=lambda: config_reload(lower_tor_host,
+ wait=0))
+ verify_tor_states(expected_active_host=upper_tor_host,
+ expected_standby_host=lower_tor_host)
+
+
+def test_active_config_reload_downstream_standby(upper_tor_host,
+ lower_tor_host,
+ send_t1_to_server_with_action,
+ toggle_all_simulator_ports_to_upper_tor):
+ """
+ Send downstream traffic to the standby ToR and `config reload` the
+ active ToR. Confirm switchover occurs and disruption lasts < 1 second
+ """
+ send_t1_to_server_with_action(lower_tor_host, verify=True, delay=1,
+ action=lambda: config_reload(upper_tor_host,
+ wait=0))
+ verify_tor_states(expected_active_host=lower_tor_host,
+ expected_standby_host=upper_tor_host)
+
+
+def test_tor_switch_upstream(upper_tor_host, lower_tor_host,
+ send_server_to_t1_with_action,
+ toggle_all_simulator_ports_to_upper_tor,
+ force_active_tor):
+ """
+ Send upstream traffic and perform switchover via CLI. Confirm switchover
+ occurs and disruption lasts < 1 second
+ """
+ send_server_to_t1_with_action(upper_tor_host, verify=True, delay=1,
+ action=lambda: force_active_tor(
+ lower_tor_host, 'all'))
+ verify_tor_states(expected_active_host=lower_tor_host,
+ expected_standby_host=upper_tor_host)
+
+
+def test_tor_switch_downstream_active(upper_tor_host, lower_tor_host,
+ send_t1_to_server_with_action,
+ toggle_all_simulator_ports_to_upper_tor,
+ force_active_tor):
+ """
+ Send downstream traffic to the active ToR and perform switchover via
+ CLI. Confirm switchover occurs and disruption lasts < 1 second
+ """
+ send_t1_to_server_with_action(upper_tor_host, verify=True, delay=1,
+ action=lambda: force_active_tor(
+ lower_tor_host, 'all'))
+ verify_tor_states(expected_active_host=lower_tor_host,
+ expected_standby_host=upper_tor_host)
+
+
+def test_tor_switch_downstream_standby(upper_tor_host, lower_tor_host,
+ send_t1_to_server_with_action,
+ toggle_all_simulator_ports_to_upper_tor,
+ force_active_tor):
+ """
+ Send downstream traffic to the standby ToR and perform switchover via CLI.
+ Confirm switchover occurs and disruption lasts < 1 second
+ """
+ send_t1_to_server_with_action(lower_tor_host, verify=True, delay=1,
+ action=lambda: force_active_tor(
+ lower_tor_host, 'all'))
+ verify_tor_states(expected_active_host=lower_tor_host,
+ expected_standby_host=upper_tor_host)
diff --git a/tests/iface_namingmode/test_iface_namingmode.py b/tests/iface_namingmode/test_iface_namingmode.py
index 53b37e5dd1d..5f8fd323ca3 100644
--- a/tests/iface_namingmode/test_iface_namingmode.py
+++ b/tests/iface_namingmode/test_iface_namingmode.py
@@ -2,7 +2,7 @@
import pytest
import re
-from tests.common.devices import AnsibleHostBase
+from tests.common.devices.base import AnsibleHostBase
from tests.common.utilities import wait
from netaddr import IPAddress
diff --git a/tests/ipfwd/conftest.py b/tests/ipfwd/conftest.py
new file mode 100644
index 00000000000..6c60afa82dc
--- /dev/null
+++ b/tests/ipfwd/conftest.py
@@ -0,0 +1,114 @@
+import pytest
+from ipaddress import ip_address
+import logging
+import json
+
+
+logger = logging.getLogger(__name__)
+
+
+'''
+In case of multi-dut we need src_host_ip, src_router_ip, dst_host_ip, src_ptf_port_list, dst_ptf_port_list for the dut under test,
+to take care of that made changes in the testcase
+'''
+
+def get_lag_facts(dut, lag_facts, switch_arptable, mg_facts, ignore_lags, key='src'):
+ if not mg_facts['minigraph_portchannels']:
+ pytest.fail("minigraph_portchannels is not defined")
+
+ # minigraph facts
+ selected_lag_facts = {}
+ up_lag = None
+ for a_lag_name, a_lag_data in lag_facts['lags'].items():
+ if a_lag_data['po_intf_stat'] == 'Up' and a_lag_name not in ignore_lags:
+ # We found a portchannel that is up.
+ up_lag = a_lag_name
+ selected_lag_facts[key + '_port_ids'] = [mg_facts['minigraph_ptf_indices'][intf] for intf in a_lag_data['po_config']['ports']]
+ selected_lag_facts[key + '_router_mac'] = dut.facts['router_mac']
+ for intf in mg_facts['minigraph_portchannel_interfaces']:
+ if intf['attachto'] == up_lag:
+ addr = ip_address(unicode(intf['addr']))
+ if addr.version == 4:
+ selected_lag_facts[key + '_router_ipv4'] = intf['addr']
+ selected_lag_facts[key + '_host_ipv4'] = intf['peer_addr']
+ selected_lag_facts[key + '_host_mac'] = switch_arptable['arptable']['v4'][intf['peer_addr']]['macaddress']
+ elif addr.version == 6:
+ selected_lag_facts[key + '_router_ipv6'] = intf['addr']
+ selected_lag_facts[key + '_host_ipv6'] = intf['peer_addr']
+ logger.info("{} lag is {}".format(key, up_lag))
+ break
+
+ return up_lag, selected_lag_facts
+
+
+def get_port_facts(dut, mg_facts, port_status, switch_arptable, ignore_intfs, key='src'):
+ if not mg_facts['minigraph_interfaces']:
+ pytest.fail("minigraph_interfaces is not defined.")
+ selected_port_facts = {}
+ up_port = None
+ for a_intf_name, a_intf_data in port_status['int_status'].items():
+ if a_intf_data['oper_state'] == 'up' and a_intf_name not in ignore_intfs:
+ # Got a port that is up and not already used.
+ for intf in mg_facts['minigraph_interfaces']:
+ if intf['attachto'] == a_intf_name:
+ up_port = a_intf_name
+ selected_port_facts[key + '_port_ids'] = [mg_facts['minigraph_ptf_indices'][a_intf_name]]
+ selected_port_facts[key + '_router_mac'] = dut.facts['router_mac']
+ addr = ip_address(unicode(intf['addr']))
+ if addr.version == 4:
+ selected_port_facts[key + '_router_ipv4'] = intf['addr']
+ selected_port_facts[key + '_host_ipv4'] = intf['peer_addr']
+ selected_port_facts[key + '_host_mac'] = switch_arptable['arptable']['v4'][intf['peer_addr']]['macaddress']
+ elif addr.version == 6:
+ selected_port_facts[key + '_router_ipv6'] = intf['addr']
+ selected_port_facts[key + '_host_ipv6'] = intf['peer_addr']
+ if up_port:
+ logger.info("{} port is {}".format(key, up_port))
+ break
+ return up_port, selected_port_facts
+
+@pytest.fixture(scope='function')
+def gather_facts(tbinfo, duthosts, enum_rand_one_per_hwsku_frontend_hostname):
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ facts = {}
+
+ logger.info("Gathering facts on DUT ...")
+ mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
+
+ # Use the arp table to get the mac address of the host (VM's) instead of lldp_facts as that is was is used
+ # by the DUT to forward traffic - regardless of lag or port.
+ switch_arptable = duthost.switch_arptable()['ansible_facts']
+ used_intfs = set()
+ src = None # Name of lag or interface that is is up
+ dst = None # Name of lag or interface that is is up
+
+ # if minigraph_portchannel_interfaces is not empty - topology with lag - check if we have 2 lags that are 'Up'
+ if mg_facts['minigraph_portchannel_interfaces']:
+ # Get lag facts from the DUT to check which ag is up
+ lag_facts = duthost.lag_facts(host=duthost.hostname)['ansible_facts']['lag_facts']
+ src, src_lag_facts = get_lag_facts(duthost, lag_facts, switch_arptable, mg_facts, used_intfs, key='src')
+ used_intfs.add(src)
+ if src:
+ facts.update(src_lag_facts)
+ # We found a src lag, let see if we can find a dst lag
+ dst, dst_lag_facts = get_lag_facts(duthost, lag_facts, switch_arptable, mg_facts, used_intfs, key='dst')
+ used_intfs.add(dst)
+ facts.update(dst_lag_facts)
+
+ if src is None or dst is None:
+ # We didn't find 2 lags, lets check up interfaces
+ port_status = duthost.show_interface(command='status')['ansible_facts']
+ if src is None:
+ src, src_port_facts = get_port_facts(duthost, mg_facts, port_status, switch_arptable, used_intfs, key='src')
+ used_intfs.add(src)
+ facts.update(src_port_facts)
+
+ if dst is None:
+ dst, dst_port_facts = get_port_facts(duthost, mg_facts, port_status, switch_arptable, used_intfs, key='dst')
+ facts.update(dst_port_facts)
+
+ if src is None or dst is None:
+ pytest.fail("Did not find 2 lag or interfaces that are up on host {}".duthost.hostname)
+ logger.info("gathered_new_facts={}".format(json.dumps(facts, indent=2)))
+
+ yield facts
diff --git a/tests/ipfwd/test_dip_sip.py b/tests/ipfwd/test_dip_sip.py
index 0301b87ae36..6c243db783b 100644
--- a/tests/ipfwd/test_dip_sip.py
+++ b/tests/ipfwd/test_dip_sip.py
@@ -1,8 +1,6 @@
import pytest
import ptf.testutils as testutils
-from ipaddress import ip_address
import logging
-import json
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import remove_ip_addresses # lgtm[py/unused-import]
@@ -10,13 +8,12 @@
DEFAULT_HLIM_TTL = 64
WAIT_EXPECTED_PACKET_TIMEOUT = 5
+logger = logging.getLogger(__name__)
+
pytestmark = [
pytest.mark.topology('t0', 't1', 't2')
]
-logger = logging.getLogger(__name__)
-
-
@pytest.fixture(scope="module", autouse="True")
def lldp_setup(duthosts, enum_rand_one_per_hwsku_frontend_hostname, patch_lldpctl, unpatch_lldpctl, localhost):
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
@@ -24,105 +21,6 @@ def lldp_setup(duthosts, enum_rand_one_per_hwsku_frontend_hostname, patch_lldpct
yield
unpatch_lldpctl(localhost, duthost)
-
-def lag_facts(dut, mg_facts):
- facts = {}
-
- if not mg_facts['minigraph_portchannels']:
- pytest.fail("minigraph_portchannels is not defined")
-
- # minigraph facts
- src_lag = mg_facts['minigraph_portchannel_interfaces'][2]['attachto']
- dst_lag = mg_facts['minigraph_portchannel_interfaces'][0]['attachto']
- logger.info("src_lag is {}, dst_lag is {}".format(src_lag, dst_lag))
-
- # lldp facts
- lldp_facts = dut.lldp()['ansible_facts']['lldp']
- facts['dst_host_mac'] = lldp_facts[mg_facts['minigraph_portchannels'][dst_lag]['members'][0]]['chassis']['mac']
- facts['src_host_mac'] = lldp_facts[mg_facts['minigraph_portchannels'][src_lag]['members'][0]]['chassis']['mac']
-
- facts['dst_router_mac'] = dut.facts['router_mac']
- facts['src_router_mac'] = dut.facts['router_mac']
-
- for intf in mg_facts['minigraph_portchannel_interfaces']:
- if intf['attachto'] == dst_lag:
- addr = ip_address(unicode(intf['addr']))
- if addr.version == 4:
- facts['dst_router_ipv4'] = intf['addr']
- facts['dst_host_ipv4'] = intf['peer_addr']
- elif addr.version == 6:
- facts['dst_router_ipv6'] = intf['addr']
- facts['dst_host_ipv6'] = intf['peer_addr']
-
- facts['dst_port_ids'] = []
- for intf in mg_facts['minigraph_portchannels'][dst_lag]['members']:
- facts['dst_port_ids'].append(mg_facts['minigraph_ptf_indices'][intf])
-
- facts['src_port_ids'] = []
- for intf in mg_facts['minigraph_portchannels'][src_lag]['members']:
- facts['src_port_ids'].append(mg_facts['minigraph_ptf_indices'][intf])
-
- return facts
-
-
-def port_facts(dut, mg_facts):
- facts = {}
-
- if not mg_facts['minigraph_interfaces']:
- pytest.fail("minigraph_interfaces is not defined.")
-
- # minigraph facts
- src_port = mg_facts['minigraph_interfaces'][2]['attachto']
- dst_port = mg_facts['minigraph_interfaces'][0]['attachto']
- logger.info("src_port is {}, dst_port is {}".format(src_port, dst_port))
-
- # lldp facts
- lldp_facts = dut.lldp()['ansible_facts']['lldp']
- facts['dst_host_mac'] = lldp_facts[dst_port]['chassis']['mac']
- facts['src_host_mac'] = lldp_facts[src_port]['chassis']['mac']
-
- facts['dst_router_mac'] = dut.facts['router_mac']
- facts['src_router_mac'] = dut.facts['router_mac']
-
- for intf in mg_facts['minigraph_interfaces']:
- if intf['attachto'] == dst_port:
- addr = ip_address(unicode(intf['addr']))
- if addr.version == 4:
- facts['dst_router_ipv4'] = intf['addr']
- facts['dst_host_ipv4'] = intf['peer_addr']
- elif addr.version == 6:
- facts['dst_router_ipv6'] = intf['addr']
- facts['dst_host_ipv6'] = intf['peer_addr']
-
- facts['dst_port_ids'] = [mg_facts['minigraph_ptf_indices'][dst_port]]
- facts['src_port_ids'] = [mg_facts['minigraph_ptf_indices'][src_port]]
-
- return facts
-
-
-@pytest.fixture(scope='function')
-def gather_facts(tbinfo, duthosts, enum_rand_one_per_hwsku_frontend_hostname):
- duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
- facts = {}
-
- topo_type = tbinfo['topo']['type']
- if topo_type not in ('t0', 't1', 't2'):
- pytest.skip("Unsupported topology")
-
- logger.info("Gathering facts on DUT ...")
- mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
-
- # if minigraph_portchannel_interfaces is not empty - topology with lag
- if mg_facts['minigraph_portchannel_interfaces']:
- facts = lag_facts(duthost, mg_facts)
- else:
- facts = port_facts(duthost, mg_facts)
-
- logger.info("gathered_facts={}".format(json.dumps(facts, indent=2)))
-
- yield facts
-
-
def run_test_ipv6(ptfadapter, facts):
logger.info("Running test with ipv6 packets")
@@ -183,7 +81,11 @@ def run_test_ipv4(ptfadapter, facts):
testutils.verify_packet_any_port(ptfadapter, exp_pkt, facts['dst_port_ids'], timeout=WAIT_EXPECTED_PACKET_TIMEOUT)
-def test_dip_sip(ptfadapter, gather_facts):
+def test_dip_sip(tbinfo, ptfadapter, gather_facts):
+ topo_type = tbinfo['topo']['type']
+ if topo_type not in ('t0', 't1', 't2'):
+ pytest.skip("Unsupported topology")
+
ptfadapter.reinit()
run_test_ipv4(ptfadapter, gather_facts)
run_test_ipv6(ptfadapter, gather_facts)
diff --git a/tests/ipfwd/test_dir_bcast.py b/tests/ipfwd/test_dir_bcast.py
index 4976d466ff5..0a33a6ebd96 100644
--- a/tests/ipfwd/test_dir_bcast.py
+++ b/tests/ipfwd/test_dir_bcast.py
@@ -8,7 +8,7 @@
pytest.mark.topology('t0')
]
-def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo, fib):
+def test_dir_bcast(duthosts, rand_one_dut_hostname, ptfhost, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
support_testbed_types = frozenset(['t0', 't0-16', 't0-52', 't0-56', 't0-64', 't0-64-32', 't0-116'])
testbed_type = tbinfo['topo']['name']
diff --git a/tests/ipfwd/test_mtu.py b/tests/ipfwd/test_mtu.py
index 2dcd385234e..487b0746d87 100644
--- a/tests/ipfwd/test_mtu.py
+++ b/tests/ipfwd/test_mtu.py
@@ -7,12 +7,16 @@
from datetime import datetime
pytestmark = [
- pytest.mark.topology('t1')
+ pytest.mark.topology('t1', 't2')
]
@pytest.mark.parametrize("mtu", [1514,9114])
-def test_mtu(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, mtu):
- duthost = duthosts[rand_one_dut_hostname]
+def test_mtu(tbinfo, duthosts, enum_rand_one_per_hwsku_frontend_hostname, ptfhost, mtu, gather_facts):
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+
+ topo_type = tbinfo['topo']['type']
+ if topo_type not in ('t1', 't2'):
+ pytest.skip("Unsupported topology")
testbed_type = tbinfo['topo']['name']
router_mac = duthost.shell('sonic-cfggen -d -v \'DEVICE_METADATA.localhost.mac\'')["stdout_lines"][0].decode("utf-8")
@@ -27,6 +31,12 @@ def test_mtu(tbinfo, duthosts, rand_one_dut_hostname, ptfhost, mtu):
platform_dir="ptftests",
params={"testbed_type": testbed_type,
"router_mac": router_mac,
- "testbed_mtu": mtu },
+ "testbed_mtu": mtu,
+ "src_host_ip": gather_facts['src_host_ipv4'],
+ "src_router_ip": gather_facts['src_router_ipv4'],
+ "dst_host_ip": gather_facts['dst_host_ipv4'],
+ "src_ptf_port_list": gather_facts['src_port_ids'],
+ "dst_ptf_port_list": gather_facts['dst_port_ids']
+ },
log_file=log_file,
socket_recv_size=16384)
diff --git a/tests/ixia/ecn/files/helper.py b/tests/ixia/ecn/files/helper.py
index a252d77f02a..a57823899c0 100644
--- a/tests/ixia/ecn/files/helper.py
+++ b/tests/ixia/ecn/files/helper.py
@@ -308,6 +308,9 @@ def __run_traffic(api,
time.sleep(1)
attempts += 1
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
""" Dump captured packets """
pcap_bytes = api.get_capture_results(CaptureRequest(port_name=capture_port_name))
with open(pcap_file_name, 'wb') as fid:
diff --git a/tests/ixia/pfc/files/helper.py b/tests/ixia/pfc/files/helper.py
index 5e4d49321f1..0b93e02e6a9 100644
--- a/tests/ixia/pfc/files/helper.py
+++ b/tests/ixia/pfc/files/helper.py
@@ -326,6 +326,9 @@ def __run_traffic(api,
time.sleep(1)
attempts += 1
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
""" Dump per-flow statistics """
rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
api.set_state(State(FlowTransmitState(state='stop')))
@@ -350,7 +353,7 @@ def __verify_results(rows,
Args:
rows (list): per-flow statistics
duthost (Ansible host instance): device under test
- pause_flow_name: name of pause storm
+ pause_flow_name (str): name of pause storm
test_flow_name (str): name of test flows
bg_flow_name (str): name of background flows
test_flow_rate_percent (int): rate percentage for each test flow
diff --git a/tests/ixia/pfcwd/files/pfcwd_basic_helper.py b/tests/ixia/pfcwd/files/pfcwd_basic_helper.py
index 161ba4debae..fb9d309d3ba 100644
--- a/tests/ixia/pfcwd/files/pfcwd_basic_helper.py
+++ b/tests/ixia/pfcwd/files/pfcwd_basic_helper.py
@@ -275,6 +275,9 @@ def __run_traffic(api, config, all_flow_names, exp_dur_sec):
time.sleep(1)
attempts += 1
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
""" Dump per-flow statistics """
rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
api.set_state(State(FlowTransmitState(state='stop')))
diff --git a/tests/ixia/pfcwd/files/pfcwd_multi_node_helper.py b/tests/ixia/pfcwd/files/pfcwd_multi_node_helper.py
new file mode 100644
index 00000000000..186d34d03bf
--- /dev/null
+++ b/tests/ixia/pfcwd/files/pfcwd_multi_node_helper.py
@@ -0,0 +1,676 @@
+import time
+from math import ceil
+from itertools import permutations
+
+from tests.common.helpers.assertions import pytest_assert, pytest_require
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
+ fanout_graph_facts
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api
+from tests.common.ixia.ixia_helpers import get_dut_port_id
+from tests.common.ixia.common_helpers import pfc_class_enable_vector,\
+ start_pfcwd, enable_packet_aging, get_pfcwd_poll_interval, get_pfcwd_detect_time
+
+from abstract_open_traffic_generator.flow import DeviceTxRx, TxRx, Flow, Header,\
+ Size, Rate, Duration, FixedSeconds, FixedPackets, PortTxRx, PfcPause
+from abstract_open_traffic_generator.flow_ipv4 import Priority, Dscp
+from abstract_open_traffic_generator.flow import Pattern as FieldPattern
+from abstract_open_traffic_generator.flow import Ipv4 as Ipv4Header
+from abstract_open_traffic_generator.flow import Ethernet as EthernetHeader
+from abstract_open_traffic_generator.control import State, ConfigState, FlowTransmitState
+from abstract_open_traffic_generator.result import FlowRequest
+
+PAUSE_FLOW_NAME = 'Pause Storm'
+TEST_FLOW_NAME = 'Test Flow'
+TEST_FLOW_AGGR_RATE_PERCENT = 45
+BG_FLOW_NAME = 'Background Flow'
+BG_FLOW_AGGR_RATE_PERCENT = 45
+DATA_PKT_SIZE = 1024
+IXIA_POLL_DELAY_SEC = 2
+TOLERANCE_THRESHOLD = 0.05
+
+def run_pfcwd_multi_node_test(api,
+ testbed_config,
+ conn_data,
+ fanout_data,
+ duthost,
+ dut_port,
+ pause_prio_list,
+ test_prio_list,
+ bg_prio_list,
+ prio_dscp_map,
+ trigger_pfcwd,
+ pattern):
+ """
+ Run PFC watchdog test in a multi-node (>=3) topoology
+
+ Args:
+ api (obj): IXIA session
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ conn_data (dict): the dictionary returned by conn_graph_fact.
+ fanout_data (dict): the dictionary returned by fanout_graph_fact.
+ duthost (Ansible host instance): device under test
+ dut_port (str): DUT port to test
+ pause_prio_list (list): priorities to pause for PFC pause storm
+ test_prio_list (list): priorities of test flows
+ bg_prio_list (list): priorities of background flows
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+ trigger_pfcwd (bool): if PFC watchdog is expected to be triggered
+ pattern (str): traffic pattern
+ Returns:
+ N/A
+ """
+ patterns = ['all to all', '2 sender 2 receiver']
+ pytest_assert(pattern in patterns,
+ 'Unknown pattern {}. We only support {}'.format(pattern, patterns))
+
+ pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config')
+
+ num_devices = len(testbed_config.devices)
+ pytest_require(num_devices >= 3, "This test requires at least 3 hosts")
+
+ start_pfcwd(duthost)
+ enable_packet_aging(duthost)
+
+ """ Get the ID of the port to test """
+ port_id = get_dut_port_id(dut_hostname=duthost.hostname,
+ dut_port=dut_port,
+ conn_data=conn_data,
+ fanout_data=fanout_data)
+
+ pytest_assert(port_id is not None,
+ 'Fail to get ID for port {}'.format(dut_port))
+
+ poll_interval_sec = get_pfcwd_poll_interval(duthost) / 1000.0
+ detect_time_sec = get_pfcwd_detect_time(host_ans=duthost, intf=dut_port) / 1000.0
+
+ if trigger_pfcwd:
+ pfc_storm_dur_sec = poll_interval_sec + detect_time_sec
+ else:
+ pfc_storm_dur_sec = 0.5 * detect_time_sec
+
+ exp_dur_sec = ceil(pfc_storm_dur_sec + 1)
+
+ """ Generate traffic config """
+ if pattern == "all to all":
+ test_flow_rate_percent = int(TEST_FLOW_AGGR_RATE_PERCENT / \
+ (num_devices - 1) / \
+ len(test_prio_list))
+
+ bg_flow_rate_percent = int(BG_FLOW_AGGR_RATE_PERCENT / \
+ (num_devices - 1) / \
+ len(bg_prio_list))
+
+ flows = __gen_a2a_traffic(testbed_config=testbed_config,
+ port_id=port_id,
+ pause_flow_name=PAUSE_FLOW_NAME,
+ pause_prio_list=pause_prio_list,
+ test_flow_name=TEST_FLOW_NAME,
+ test_flow_prio_list=test_prio_list,
+ test_flow_rate_percent=test_flow_rate_percent,
+ bg_flow_name=BG_FLOW_NAME,
+ bg_flow_prio_list=bg_prio_list,
+ bg_flow_rate_percent=bg_flow_rate_percent,
+ data_flow_dur_sec=exp_dur_sec,
+ pfc_storm_dur_sec=pfc_storm_dur_sec,
+ data_pkt_size=DATA_PKT_SIZE,
+ prio_dscp_map=prio_dscp_map)
+
+ elif pattern == "2 sender 2 receiver":
+ test_flow_rate_percent = int(TEST_FLOW_AGGR_RATE_PERCENT / \
+ 2.0 / \
+ len(test_prio_list))
+ bg_flow_rate_percent = int(BG_FLOW_AGGR_RATE_PERCENT / \
+ 2.0 / \
+ len(bg_prio_list))
+
+ flows = __gen_2sender_2receiver_traffic(testbed_config=testbed_config,
+ port_id=port_id,
+ pause_flow_name=PAUSE_FLOW_NAME,
+ pause_prio_list=pause_prio_list,
+ test_flow_name=TEST_FLOW_NAME,
+ test_flow_prio_list=test_prio_list,
+ test_flow_rate_percent=test_flow_rate_percent,
+ bg_flow_name=BG_FLOW_NAME,
+ bg_flow_prio_list=bg_prio_list,
+ bg_flow_rate_percent=bg_flow_rate_percent,
+ data_flow_dur_sec=exp_dur_sec,
+ pfc_storm_dur_sec=pfc_storm_dur_sec,
+ data_pkt_size=DATA_PKT_SIZE,
+ prio_dscp_map=prio_dscp_map)
+
+
+ """ Tgen config = testbed config + flow config """
+ config = testbed_config
+ config.flows = flows
+
+ all_flow_names = [flow.name for flow in flows]
+
+ flow_stats = __run_traffic(api=api,
+ config=config,
+ all_flow_names=all_flow_names,
+ exp_dur_sec=exp_dur_sec)
+
+ speed_str = config.layer1[0].speed
+ speed_gbps = int(speed_str.split('_')[1])
+
+ __verify_results(rows=flow_stats,
+ speed_gbps=speed_gbps,
+ pause_flow_name=PAUSE_FLOW_NAME,
+ test_flow_name=TEST_FLOW_NAME,
+ bg_flow_name=BG_FLOW_NAME,
+ test_flow_rate_percent=test_flow_rate_percent,
+ bg_flow_rate_percent=bg_flow_rate_percent,
+ data_flow_dur_sec=exp_dur_sec,
+ data_pkt_size=DATA_PKT_SIZE,
+ trigger_pfcwd=trigger_pfcwd,
+ pause_port_id=port_id,
+ tolerance=TOLERANCE_THRESHOLD)
+
+def __data_flow_name(name_prefix, src_id, dst_id, prio):
+ """
+ Generate name for a data flow
+
+ Args:
+ name_prefix (str): name prefix
+ src_id (int): ID of the source port
+ dst_id (int): ID of the destination port
+ prio (int): priority of the flow
+
+ Returns:
+ Name of the flow (str)
+ """
+ return "{} {} -> {} Prio {}".format(name_prefix, src_id, dst_id, prio)
+
+def __data_flow_src(flow_name):
+ """
+ Get the source ID from the data flow's name
+
+ Args:
+ flow_name (str): name of the data flow
+
+ Returns:
+ ID of the source port (str)
+ """
+ words = flow_name.split()
+ index = words.index('->')
+ return int(words[index-1])
+
+def __data_flow_dst(flow_name):
+ """
+ Get the destination ID from the data flow's name
+
+ Args:
+ flow_name (str): name of the data flow
+
+ Returns:
+ ID of the destination port (str)
+ """
+ words = flow_name.split()
+ index = words.index('->')
+ return int(words[index+1])
+
+def __gen_2sender_2receiver_traffic(testbed_config,
+ port_id,
+ pause_flow_name,
+ pause_prio_list,
+ test_flow_name,
+ test_flow_prio_list,
+ test_flow_rate_percent,
+ bg_flow_name,
+ bg_flow_prio_list,
+ bg_flow_rate_percent,
+ data_flow_dur_sec,
+ pfc_storm_dur_sec,
+ data_pkt_size,
+ prio_dscp_map):
+ """
+ Generate configurations of flows under 2 sender 2 receiver traffic pattern,
+ including test flows, background flows and pause storm. Test flows and
+ background flows are also known as data flows.
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ port_id (int): ID of DUT port to test.
+ pause_flow_name (str): name of pause storm
+ pause_prio_list (list): priorities to pause for PFC frames
+ test_flow_name (str): name prefix of test flows
+ test_prio_list (list): priorities of test flows
+ test_flow_rate_percent (int): rate percentage for each test flow
+ bg_flow_name (str): name prefix of background flows
+ bg_prio_list (list): priorities of background flows
+ bg_flow_rate_percent (int): rate percentage for each background flow
+ data_flow_dur_sec (int): duration of data flows in second
+ pfc_storm_dur_sec (float): duration of the pause storm in second
+ data_pkt_size (int): packet size of data flows in byte
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ flows configurations (list)
+ """
+ result = list()
+
+ """ Generate a PFC pause storm """
+ pause_port_id = port_id
+ pause_flow = __gen_pause_flow(testbed_config=testbed_config,
+ src_port_id=pause_port_id,
+ flow_name=pause_flow_name,
+ pause_prio_list=pause_prio_list,
+ flow_dur_sec=pfc_storm_dur_sec)
+
+ result.append(pause_flow)
+
+ """
+ Generate bi-birectional data flows between [port_id+2] and
+ [port_id, port_id+1]
+ """
+ one_port_id_list = [(port_id + 2) % len(testbed_config.devices)]
+ two_port_id_list = [port_id, (port_id + 1) % len(testbed_config.devices)]
+
+ perm = permutations([one_port_id_list, two_port_id_list])
+
+ for src_port_id_list, dst_port_id_list in list(perm):
+ test_flows = __gen_data_flows(testbed_config=testbed_config,
+ src_port_id_list=src_port_id_list,
+ dst_port_id_list=dst_port_id_list,
+ flow_name_prefix=TEST_FLOW_NAME,
+ flow_prio_list=test_flow_prio_list,
+ flow_rate_percent=test_flow_rate_percent,
+ flow_dur_sec=data_flow_dur_sec,
+ data_pkt_size=data_pkt_size,
+ prio_dscp_map=prio_dscp_map)
+
+ result.extend(test_flows)
+
+ bg_flows = __gen_data_flows(testbed_config=testbed_config,
+ src_port_id_list=src_port_id_list,
+ dst_port_id_list=dst_port_id_list,
+ flow_name_prefix=BG_FLOW_NAME,
+ flow_prio_list=bg_flow_prio_list,
+ flow_rate_percent=bg_flow_rate_percent,
+ flow_dur_sec=data_flow_dur_sec,
+ data_pkt_size=data_pkt_size,
+ prio_dscp_map=prio_dscp_map)
+
+ result.extend(bg_flows)
+
+ return result
+
+def __gen_a2a_traffic(testbed_config,
+ port_id,
+ pause_flow_name,
+ pause_prio_list,
+ test_flow_name,
+ test_flow_prio_list,
+ test_flow_rate_percent,
+ bg_flow_name,
+ bg_flow_prio_list,
+ bg_flow_rate_percent,
+ data_flow_dur_sec,
+ pfc_storm_dur_sec,
+ data_pkt_size,
+ prio_dscp_map):
+ """
+ Generate configurations of flows under all to all traffic pattern, including
+ test flows, background flows and pause storm. Test flows and background flows
+ are also known as data flows.
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ port_id (int): ID of DUT port to test.
+ pause_flow_name (str): name of pause storm
+ pause_prio_list (list): priorities to pause for PFC frames
+ test_flow_name (str): name prefix of test flows
+ test_prio_list (list): priorities of test flows
+ test_flow_rate_percent (int): rate percentage for each test flow
+ bg_flow_name (str): name prefix of background flows
+ bg_prio_list (list): priorities of background flows
+ bg_flow_rate_percent (int): rate percentage for each background flow
+ data_flow_dur_sec (int): duration of data flows in second
+ pfc_storm_dur_sec (float): duration of the pause storm in second
+ data_pkt_size (int): packet size of data flows in byte
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ flows configurations (list)
+ """
+ result = list()
+
+ """ Generate a PFC pause storm """
+ pause_port_id = port_id
+ pause_flow = __gen_pause_flow(testbed_config=testbed_config,
+ src_port_id=pause_port_id,
+ flow_name=pause_flow_name,
+ pause_prio_list=pause_prio_list,
+ flow_dur_sec=pfc_storm_dur_sec)
+
+ result.append(pause_flow)
+
+ """ Generate all-to-all traffic pattern """
+ num_devices = len(testbed_config.devices)
+
+ for src_port_id in range(num_devices):
+ src_port_id_list = [src_port_id]
+ dst_port_id_list =[i for i in range(num_devices) if i != src_port_id]
+
+ test_flows = __gen_data_flows(testbed_config=testbed_config,
+ src_port_id_list=src_port_id_list,
+ dst_port_id_list=dst_port_id_list,
+ flow_name_prefix=TEST_FLOW_NAME,
+ flow_prio_list=test_flow_prio_list,
+ flow_rate_percent=test_flow_rate_percent,
+ flow_dur_sec=data_flow_dur_sec,
+ data_pkt_size=data_pkt_size,
+ prio_dscp_map=prio_dscp_map)
+
+ result.extend(test_flows)
+
+ bg_flows = __gen_data_flows(testbed_config=testbed_config,
+ src_port_id_list=src_port_id_list,
+ dst_port_id_list=dst_port_id_list,
+ flow_name_prefix=BG_FLOW_NAME,
+ flow_prio_list=bg_flow_prio_list,
+ flow_rate_percent=bg_flow_rate_percent,
+ flow_dur_sec=data_flow_dur_sec,
+ data_pkt_size=data_pkt_size,
+ prio_dscp_map=prio_dscp_map)
+
+ result.extend(bg_flows)
+
+ return result
+
+def __gen_data_flows(testbed_config,
+ src_port_id_list,
+ dst_port_id_list,
+ flow_name_prefix,
+ flow_prio_list,
+ flow_rate_percent,
+ flow_dur_sec,
+ data_pkt_size,
+ prio_dscp_map):
+ """
+ Generate the configuration for data flows
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ src_port_id_list (list): IDs of source ports
+ dst_port_id_list (list): IDs of destination ports
+ flow_name_prefix (str): prefix of flows' names
+ flow_prio_list (list): priorities of data flows
+ flow_rate_percent (int): rate percentage for each flow
+ flow_dur_sec (int): duration of each flow in second
+ data_pkt_size (int): packet size of data flows in byte
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ flows configurations (list): the list should have configurations of
+ len(src_port_id_list) * len(dst_port_id_list) * len(flow_prio_list)
+ data flows
+ """
+ flows = []
+
+ for src_port_id in src_port_id_list:
+ for dst_port_id in dst_port_id_list:
+ for prio in flow_prio_list:
+ flow = __gen_data_flow(testbed_config=testbed_config,
+ src_port_id=src_port_id,
+ dst_port_id=dst_port_id,
+ flow_name_prefix=flow_name_prefix,
+ flow_prio=prio,
+ flow_rate_percent=flow_rate_percent,
+ flow_dur_sec=flow_dur_sec,
+ data_pkt_size=data_pkt_size,
+ prio_dscp_map=prio_dscp_map)
+ flows.append(flow)
+
+ return flows
+
+def __gen_data_flow(testbed_config,
+ src_port_id,
+ dst_port_id,
+ flow_name_prefix,
+ flow_prio,
+ flow_rate_percent,
+ flow_dur_sec,
+ data_pkt_size,
+ prio_dscp_map):
+ """
+ Generate the configuration for a data flow
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ src_port_id (int): ID of the source port
+ dst_port_id (int): ID of destination port
+ flow_name_prefix (str): prefix of flow' name
+ flow_prio_list (list): priorities of the flow
+ flow_rate_percent (int): rate percentage for the flow
+ flow_dur_sec (int): duration of the flow in second
+ data_pkt_size (int): packet size of the flow in byte
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ flow configuration (obj): including name, packet format, rate, ...
+ """
+ data_endpoint = DeviceTxRx(
+ tx_device_names=[testbed_config.devices[src_port_id].name],
+ rx_device_names=[testbed_config.devices[dst_port_id].name],
+ )
+
+ ip_prio = Priority(Dscp(phb=FieldPattern(choice=prio_dscp_map[flow_prio]),
+ ecn=FieldPattern(choice=Dscp.ECN_CAPABLE_TRANSPORT_1)))
+
+ pfc_queue = FieldPattern([flow_prio])
+
+ flow_name = __data_flow_name(name_prefix=flow_name_prefix,
+ src_id=src_port_id,
+ dst_id=dst_port_id,
+ prio=flow_prio)
+
+ flow = Flow(
+ name=flow_name,
+ tx_rx=TxRx(data_endpoint),
+ packet=[
+ Header(choice=EthernetHeader(pfc_queue=pfc_queue)),
+ Header(choice=Ipv4Header(priority=ip_prio))
+ ],
+ size=Size(data_pkt_size),
+ rate=Rate('line', flow_rate_percent),
+ duration=Duration(FixedSeconds(seconds=flow_dur_sec))
+ )
+
+ return flow
+
+def __gen_pause_flow(testbed_config,
+ src_port_id,
+ flow_name,
+ pause_prio_list,
+ flow_dur_sec):
+ """
+ Generate the configuration for a PFC pause storm
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ src_port_id (int): ID of the source port
+ flow_name (str): flow' name
+ pause_prio_list (list): priorities to pause for PFC frames
+ flow_dur_sec (float): duration of the flow in second
+
+ Returns:
+ flow configuration (obj): including name, packet format, rate, ...
+ """
+ pause_time = []
+ for x in range(8):
+ if x in pause_prio_list:
+ pause_time.append('ffff')
+ else:
+ pause_time.append('0000')
+
+ vector = pfc_class_enable_vector(pause_prio_list)
+
+ pause_pkt = Header(PfcPause(
+ dst=FieldPattern(choice='01:80:C2:00:00:01'),
+ src=FieldPattern(choice='00:00:fa:ce:fa:ce'),
+ class_enable_vector=FieldPattern(choice=vector),
+ pause_class_0=FieldPattern(choice=pause_time[0]),
+ pause_class_1=FieldPattern(choice=pause_time[1]),
+ pause_class_2=FieldPattern(choice=pause_time[2]),
+ pause_class_3=FieldPattern(choice=pause_time[3]),
+ pause_class_4=FieldPattern(choice=pause_time[4]),
+ pause_class_5=FieldPattern(choice=pause_time[5]),
+ pause_class_6=FieldPattern(choice=pause_time[6]),
+ pause_class_7=FieldPattern(choice=pause_time[7]),
+ ))
+
+
+ dst_port_id = (src_port_id + 1) % len(testbed_config.devices)
+ pause_src_point = PortTxRx(tx_port_name=testbed_config.ports[src_port_id].name,
+ rx_port_name=testbed_config.ports[dst_port_id].name)
+
+ """
+ The minimal fixed time duration in IXIA is 1 second.
+ To support smaller durations, we need to use # of packets
+ """
+ speed_str = testbed_config.layer1[0].speed
+ speed_gbps = int(speed_str.split('_')[1])
+ pause_dur = 65535 * 64 * 8.0 / (speed_gbps * 1e9)
+ pps = int(2 / pause_dur)
+ pkt_cnt = pps * flow_dur_sec
+
+ pause_flow = Flow(
+ name=flow_name,
+ tx_rx=TxRx(pause_src_point),
+ packet=[pause_pkt],
+ size=Size(64),
+ rate=Rate('pps', value=pps),
+ duration=Duration(FixedPackets(packets=pkt_cnt, delay=0))
+ )
+
+ return pause_flow
+
+def __run_traffic(api, config, all_flow_names, exp_dur_sec):
+ """
+ Run traffic and dump per-flow statistics
+
+ Args:
+ api (obj): IXIA session
+ config (obj): experiment config (testbed config + flow config)
+ all_flow_names (list): list of names of all the flows
+ exp_dur_sec (int): experiment duration in second
+
+ Returns:
+ per-flow statistics (list)
+ """
+ api.set_state(State(ConfigState(config=config, state='set')))
+ api.set_state(State(FlowTransmitState(state='start')))
+ time.sleep(exp_dur_sec)
+
+ attempts = 0
+ max_attempts = 20
+
+ while attempts < max_attempts:
+ rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
+
+ """ If all the data flows have stopped """
+ transmit_states = [row['transmit'] for row in rows]
+ if len(rows) == len(all_flow_names) and\
+ list(set(transmit_states)) == ['stopped']:
+ time.sleep(IXIA_POLL_DELAY_SEC)
+ break
+ else:
+ time.sleep(1)
+ attempts += 1
+
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
+ """ Dump per-flow statistics """
+ rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
+ api.set_state(State(FlowTransmitState(state='stop')))
+
+ return rows
+
+def __verify_results(rows,
+ speed_gbps,
+ pause_flow_name,
+ test_flow_name,
+ bg_flow_name,
+ test_flow_rate_percent,
+ bg_flow_rate_percent,
+ data_flow_dur_sec,
+ data_pkt_size,
+ trigger_pfcwd,
+ pause_port_id,
+ tolerance):
+ """
+ Verify if we get expected experiment results
+
+ Args:
+ rows (list): per-flow statistics
+ speed_gbps (int): link speed in Gbps
+ pause_flow_name (str): name of pause storm
+ test_flow_name (str): name of test flows
+ bg_flow_name (str): name of background flows
+ test_flow_rate_percent (int): rate percentage for each test flow
+ bg_flow_rate_percent (int): rate percentage for each background flow
+ data_pkt_size (int): packet size of data flows in byte
+ test_flow_pause (bool): if test flows are expected to be paused
+ trigger_pfcwd (bool): if PFC watchdog is expected to be triggered
+ pause_port_id (int): ID of the port to send PFC pause frames
+ tolerance (float): maximum allowable deviation
+
+ Returns:
+ N/A
+ """
+ for row in rows:
+ flow_name = row['name']
+ tx_frames = row['frames_tx']
+ rx_frames = row['frames_rx']
+
+ if pause_flow_name in flow_name:
+ """ PFC pause storm """
+ pytest_assert(tx_frames > 0 and rx_frames == 0,
+ "All the PFC packets should be dropped")
+
+ elif bg_flow_name in flow_name:
+ """ Background flows """
+ pytest_assert(tx_frames == rx_frames,
+ '{} should not have any dropped packet'.format(flow_name))
+
+ exp_bg_flow_rx_pkts = bg_flow_rate_percent / 100.0 * speed_gbps \
+ * 1e9 * data_flow_dur_sec / 8.0 / data_pkt_size
+ deviation = (rx_frames - exp_bg_flow_rx_pkts) / float(exp_bg_flow_rx_pkts)
+ pytest_assert(abs(deviation) < tolerance,
+ '{} should receive {} packets (actual {})'.\
+ format(flow_name, exp_bg_flow_rx_pkts, rx_frames))
+
+ elif test_flow_name in flow_name:
+ """ Test flows """
+ src_port_id = __data_flow_src(flow_name)
+ dst_port_id = __data_flow_dst(flow_name)
+
+ exp_test_flow_rx_pkts = test_flow_rate_percent / 100.0 * speed_gbps \
+ * 1e9 * data_flow_dur_sec / 8.0 / data_pkt_size
+
+ if trigger_pfcwd and\
+ (src_port_id == pause_port_id or dst_port_id == pause_port_id):
+ """ Once PFC watchdog is triggered, it will impact bi-directional traffic """
+ pytest_assert(tx_frames > rx_frames,
+ '{} should have dropped packets'.format(flow_name))
+
+ elif not trigger_pfcwd and dst_port_id == pause_port_id:
+ """ This test flow is delayed by PFC storm """
+ pytest_assert(tx_frames == rx_frames,
+ '{} should not have any dropped packet'.format(flow_name))
+ pytest_assert(rx_frames < exp_test_flow_rx_pkts,
+ '{} shoudl receive less than {} packets (actual {})'.\
+ format(flow_name, exp_test_flow_rx_pkts, rx_frames))
+
+ else:
+ """ Otherwise, the test flow is not impacted by PFC storm """
+ pytest_assert(tx_frames == rx_frames,
+ '{} should not have any dropped packet'.format(flow_name))
+
+ deviation = (rx_frames - exp_test_flow_rx_pkts) / float(exp_test_flow_rx_pkts)
+ pytest_assert(abs(deviation) < tolerance,
+ '{} should receive {} packets (actual {})'.\
+ format(flow_name, exp_test_flow_rx_pkts, rx_frames))
diff --git a/tests/ixia/pfcwd/files/pfcwd_runtime_traffic_helper.py b/tests/ixia/pfcwd/files/pfcwd_runtime_traffic_helper.py
new file mode 100644
index 00000000000..a3c1260624e
--- /dev/null
+++ b/tests/ixia/pfcwd/files/pfcwd_runtime_traffic_helper.py
@@ -0,0 +1,226 @@
+import time
+
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api
+from tests.common.ixia.ixia_helpers import get_dut_port_id
+from tests.common.ixia.common_helpers import start_pfcwd, stop_pfcwd
+
+from abstract_open_traffic_generator.flow import DeviceTxRx, TxRx, Flow, Header,\
+ Size, Rate, Duration, FixedSeconds
+from abstract_open_traffic_generator.flow_ipv4 import Priority, Dscp
+from abstract_open_traffic_generator.flow import Pattern as FieldPattern
+from abstract_open_traffic_generator.flow import Ipv4 as Ipv4Header
+from abstract_open_traffic_generator.flow import Ethernet as EthernetHeader
+from abstract_open_traffic_generator.control import State, ConfigState,\
+ FlowTransmitState
+from abstract_open_traffic_generator.result import FlowRequest
+
+DATA_FLOW_NAME = "Data Flow"
+DATA_PKT_SIZE = 1024
+DATA_FLOW_DURATION_SEC = 15
+PFCWD_START_DELAY_SEC = 3
+IXIA_POLL_DELAY_SEC = 2
+TOLERANCE_THRESHOLD = 0.05
+
+def run_pfcwd_runtime_traffic_test(api,
+ testbed_config,
+ conn_data,
+ fanout_data,
+ duthost,
+ dut_port,
+ prio_list,
+ prio_dscp_map):
+ """
+ Test PFC watchdog's impact on runtime traffic
+
+ Args:
+ api (obj): IXIA session
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ conn_data (dict): the dictionary returned by conn_graph_fact.
+ fanout_data (dict): the dictionary returned by fanout_graph_fact.
+ duthost (Ansible host instance): device under test
+ dut_port (str): DUT port to test
+ prio_list (list): priorities of data flows
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ N/A
+ """
+ pytest_assert(testbed_config is not None, 'Fail to get L2/3 testbed config')
+
+ stop_pfcwd(duthost)
+
+ """ Get the ID of the port to test """
+ port_id = get_dut_port_id(dut_hostname=duthost.hostname,
+ dut_port=dut_port,
+ conn_data=conn_data,
+ fanout_data=fanout_data)
+
+ pytest_assert(port_id is not None,
+ 'Fail to get ID for port {}'.format(dut_port))
+
+ flows = __gen_traffic(testbed_config=testbed_config,
+ port_id=port_id,
+ data_flow_name=DATA_FLOW_NAME,
+ data_flow_dur_sec=DATA_FLOW_DURATION_SEC,
+ data_pkt_size=DATA_PKT_SIZE,
+ prio_list=prio_list,
+ prio_dscp_map=prio_dscp_map)
+
+ """ Tgen config = testbed config + flow config """
+ config = testbed_config
+ config.flows = flows
+
+ all_flow_names = [flow.name for flow in flows]
+
+ flow_stats = __run_traffic(api=api,
+ config=config,
+ duthost=duthost,
+ all_flow_names=all_flow_names,
+ pfcwd_start_delay_sec=PFCWD_START_DELAY_SEC,
+ exp_dur_sec=DATA_FLOW_DURATION_SEC)
+
+ speed_str = config.layer1[0].speed
+ speed_gbps = int(speed_str.split('_')[1])
+
+ __verify_results(rows=flow_stats,
+ speed_gbps=speed_gbps,
+ data_flow_dur_sec=DATA_FLOW_DURATION_SEC,
+ data_pkt_size=DATA_PKT_SIZE,
+ tolerance=TOLERANCE_THRESHOLD)
+
+def __gen_traffic(testbed_config,
+ port_id,
+ data_flow_name,
+ data_flow_dur_sec,
+ data_pkt_size,
+ prio_list,
+ prio_dscp_map):
+ """
+ Generate configurations of flows
+
+ Args:
+ testbed_config (obj): L2/L3 config of a T0 testbed
+ port_id (int): ID of DUT port to test.
+ data_flow_name (str): data flow name
+ data_flow_dur_sec (int): duration of data flows in second
+ data_pkt_size (int): size of data packets in byte
+ prio_list (list): priorities of data flows
+ prio_dscp_map (dict): Priority vs. DSCP map (key = priority).
+
+ Returns:
+ flows configurations (list): the list should have configurations of
+ len(prio_list) data flows
+ """
+ rx_port_id = port_id
+ tx_port_id = (port_id + 1) % len(testbed_config.devices)
+
+ data_endpoint = DeviceTxRx(
+ tx_device_names=[testbed_config.devices[tx_port_id].name],
+ rx_device_names=[testbed_config.devices[rx_port_id].name],
+ )
+
+ result = list()
+ data_flow_rate_percent = int(100 / len(prio_list))
+
+ """ For each priority """
+ for prio in prio_list:
+ ip_prio = Priority(Dscp(phb=FieldPattern(choice=prio_dscp_map[prio]),
+ ecn=FieldPattern(choice=Dscp.ECN_CAPABLE_TRANSPORT_1)))
+ pfc_queue = FieldPattern([prio])
+
+ data_flow = Flow(
+ name='{} Prio {}'.format(data_flow_name, prio),
+ tx_rx=TxRx(data_endpoint),
+ packet=[
+ Header(choice=EthernetHeader(pfc_queue=pfc_queue)),
+ Header(choice=Ipv4Header(priority=ip_prio))
+ ],
+ size=Size(data_pkt_size),
+ rate=Rate('line', data_flow_rate_percent),
+ duration=Duration(FixedSeconds(seconds=data_flow_dur_sec))
+ )
+
+ result.append(data_flow)
+
+ return result
+
+def __run_traffic(api, config, duthost, all_flow_names, pfcwd_start_delay_sec, exp_dur_sec):
+ """
+ Start traffic at time 0 and enable PFC watchdog at pfcwd_start_delay_sec
+
+ Args:
+ api (obj): IXIA session
+ config (obj): experiment config (testbed config + flow config)
+ duthost (Ansible host instance): device under test
+ all_flow_names (list): list of names of all the flows
+ pfcwd_start_delay_sec (int): PFC watchdog start delay in second
+ exp_dur_sec (int): experiment duration in second
+
+ Returns:
+ per-flow statistics (list)
+ """
+
+ api.set_state(State(ConfigState(config=config, state='set')))
+ api.set_state(State(FlowTransmitState(state='start')))
+
+ time.sleep(pfcwd_start_delay_sec)
+ start_pfcwd(duthost)
+ time.sleep(exp_dur_sec - pfcwd_start_delay_sec)
+
+ attempts = 0
+ max_attempts = 20
+
+ while attempts < max_attempts:
+ rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
+ """ If all the flows have stopped """
+ transmit_states = [row['transmit'] for row in rows]
+ if len(rows) == len(all_flow_names) and\
+ list(set(transmit_states)) == ['stopped']:
+ time.sleep(IXIA_POLL_DELAY_SEC)
+ break
+ else:
+ time.sleep(1)
+ attempts += 1
+
+ pytest_assert(attempts < max_attempts,
+ "Flows do not stop in {} seconds".format(max_attempts))
+
+ """ Dump per-flow statistics """
+ rows = api.get_flow_results(FlowRequest(flow_names=all_flow_names))
+ api.set_state(State(FlowTransmitState(state='stop')))
+
+ return rows
+
+def __verify_results(rows, speed_gbps, data_flow_dur_sec, data_pkt_size, tolerance):
+ """
+ Verify if we get expected experiment results
+
+ Args:
+ rows (list): per-flow statistics
+ speed_gbps (int): link speed in Gbps
+ data_flow_dur_sec (int): duration of data flows in second
+ data_pkt_size (int): size of data packets in byte
+ tolerance (float): maximum allowable deviation
+
+ Returns:
+ N/A
+ """
+ data_flow_rate_percent = int(100 / len(rows))
+
+ for row in rows:
+ flow_name = row['name']
+ tx_frames = row['frames_tx']
+ rx_frames = row['frames_rx']
+
+ pytest_assert(tx_frames == rx_frames, "{} packets of {} are dropped".\
+ format(tx_frames-rx_frames, flow_name))
+
+ exp_rx_pkts = data_flow_rate_percent / 100.0 * speed_gbps \
+ * 1e9 * data_flow_dur_sec / 8.0 / data_pkt_size
+
+ deviation = (rx_frames - exp_rx_pkts) / float(exp_rx_pkts)
+ pytest_assert(abs(deviation) < tolerance,
+ "{} should receive {} packets (actual {})".\
+ format(flow_name, exp_rx_pkts, rx_frames))
diff --git a/tests/ixia/pfcwd/test_pfcwd_2sender_2receiver.py b/tests/ixia/pfcwd/test_pfcwd_2sender_2receiver.py
new file mode 100644
index 00000000000..377930d9413
--- /dev/null
+++ b/tests/ixia/pfcwd/test_pfcwd_2sender_2receiver.py
@@ -0,0 +1,66 @@
+import pytest
+
+from tests.common.helpers.assertions import pytest_require, pytest_assert
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
+ fanout_graph_facts
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api, ixia_testbed
+from tests.common.ixia.qos_fixtures import prio_dscp_map, all_prio_list,\
+ lossless_prio_list, lossy_prio_list
+
+from files.pfcwd_multi_node_helper import run_pfcwd_multi_node_test
+
+@pytest.mark.topology("tgen")
+
+@pytest.mark.parametrize("trigger_pfcwd", [True, False])
+def test_pfcwd_2sender_2receiver(ixia_api,
+ ixia_testbed,
+ conn_graph_facts,
+ fanout_graph_facts,
+ duthosts,
+ rand_one_dut_hostname,
+ rand_one_dut_portname_oper_up,
+ rand_one_dut_lossless_prio,
+ lossy_prio_list,
+ prio_dscp_map,
+ trigger_pfcwd):
+
+ """
+ Run PFC watchdog test in a 3-host topology with 2 senders and 2 receivers
+
+ Args:
+ ixia_api (pytest fixture): IXIA session
+ ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
+ conn_graph_facts (pytest fixture): connection graph
+ fanout_graph_facts (pytest fixture): fanout graph
+ duthosts (pytest fixture): list of DUTs
+ rand_one_dut_hostname (str): hostname of DUT
+ rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
+ rand_one_dut_lossless_prio (str): lossless priority to test, e.g., 's6100-1|3'
+ lossy_prio_list (pytest fixture): list of lossy priorities
+ prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
+ trigger_pfcwd (bool): if PFC watchdog is expected to be triggered
+
+ Returns:
+ N/A
+ """
+ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
+ dut_hostname2, lossless_prio = rand_one_dut_lossless_prio.split('|')
+ pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2,
+ "Priority and port are not mapped to the expected DUT")
+
+ duthost = duthosts[rand_one_dut_hostname]
+ lossless_prio = int(lossless_prio)
+
+ run_pfcwd_multi_node_test(api=ixia_api,
+ testbed_config=ixia_testbed,
+ conn_data=conn_graph_facts,
+ fanout_data=fanout_graph_facts,
+ duthost=duthost,
+ dut_port=dut_port,
+ pause_prio_list=[lossless_prio],
+ test_prio_list=[lossless_prio],
+ bg_prio_list=lossy_prio_list,
+ prio_dscp_map=prio_dscp_map,
+ trigger_pfcwd=trigger_pfcwd,
+ pattern="2 sender 2 receiver")
diff --git a/tests/ixia/pfcwd/test_pfcwd_a2a.py b/tests/ixia/pfcwd/test_pfcwd_a2a.py
new file mode 100644
index 00000000000..e91097c9e11
--- /dev/null
+++ b/tests/ixia/pfcwd/test_pfcwd_a2a.py
@@ -0,0 +1,66 @@
+import pytest
+
+from tests.common.helpers.assertions import pytest_require, pytest_assert
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
+ fanout_graph_facts
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api, ixia_testbed
+from tests.common.ixia.qos_fixtures import prio_dscp_map, all_prio_list,\
+ lossless_prio_list, lossy_prio_list
+
+from files.pfcwd_multi_node_helper import run_pfcwd_multi_node_test
+
+@pytest.mark.topology("tgen")
+
+@pytest.mark.parametrize("trigger_pfcwd", [True, False])
+def test_pfcwd_a2a(ixia_api,
+ ixia_testbed,
+ conn_graph_facts,
+ fanout_graph_facts,
+ duthosts,
+ rand_one_dut_hostname,
+ rand_one_dut_portname_oper_up,
+ rand_one_dut_lossless_prio,
+ lossy_prio_list,
+ prio_dscp_map,
+ trigger_pfcwd):
+
+ """
+ Run PFC watchdog test under all to all traffic pattern
+
+ Args:
+ ixia_api (pytest fixture): IXIA session
+ ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
+ conn_graph_facts (pytest fixture): connection graph
+ fanout_graph_facts (pytest fixture): fanout graph
+ duthosts (pytest fixture): list of DUTs
+ rand_one_dut_hostname (str): hostname of DUT
+ rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
+ rand_one_dut_lossless_prio (str): lossless priority to test, e.g., 's6100-1|3'
+ lossy_prio_list (pytest fixture): list of lossy priorities
+ prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
+ trigger_pfcwd (bool): if PFC watchdog is expected to be triggered
+
+ Returns:
+ N/A
+ """
+ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
+ dut_hostname2, lossless_prio = rand_one_dut_lossless_prio.split('|')
+ pytest_require(rand_one_dut_hostname == dut_hostname == dut_hostname2,
+ "Priority and port are not mapped to the expected DUT")
+
+ duthost = duthosts[rand_one_dut_hostname]
+ lossless_prio = int(lossless_prio)
+
+ run_pfcwd_multi_node_test(api=ixia_api,
+ testbed_config=ixia_testbed,
+ conn_data=conn_graph_facts,
+ fanout_data=fanout_graph_facts,
+ duthost=duthost,
+ dut_port=dut_port,
+ pause_prio_list=[lossless_prio],
+ test_prio_list=[lossless_prio],
+ bg_prio_list=lossy_prio_list,
+ prio_dscp_map=prio_dscp_map,
+ trigger_pfcwd=trigger_pfcwd,
+ pattern="all to all")
diff --git a/tests/ixia/pfcwd/test_pfcwd_basic.py b/tests/ixia/pfcwd/test_pfcwd_basic.py
index 6716f0017ae..0c4123f90ef 100644
--- a/tests/ixia/pfcwd/test_pfcwd_basic.py
+++ b/tests/ixia/pfcwd/test_pfcwd_basic.py
@@ -92,7 +92,7 @@ def test_pfcwd_basic_multi_lossless_prio(ixia_api,
Returns:
N/A
"""
- dut_hostname, dut_port = enum_dut_portname_oper_up.split('|')
+ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
pytest_require(rand_one_dut_hostname == dut_hostname,
"Port is not mapped to the expected DUT")
diff --git a/tests/ixia/pfcwd/test_pfcwd_runtime_traffic.py b/tests/ixia/pfcwd/test_pfcwd_runtime_traffic.py
new file mode 100644
index 00000000000..6c7063bb0ce
--- /dev/null
+++ b/tests/ixia/pfcwd/test_pfcwd_runtime_traffic.py
@@ -0,0 +1,53 @@
+import pytest
+
+from tests.common.helpers.assertions import pytest_require, pytest_assert
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts,\
+ fanout_graph_facts
+from tests.common.ixia.ixia_fixtures import ixia_api_serv_ip, ixia_api_serv_port,\
+ ixia_api_serv_user, ixia_api_serv_passwd, ixia_api, ixia_testbed
+from tests.common.ixia.qos_fixtures import prio_dscp_map, all_prio_list
+
+from files.pfcwd_runtime_traffic_helper import run_pfcwd_runtime_traffic_test
+
+@pytest.mark.topology("tgen")
+
+def test_pfcwd_runtime_traffic(ixia_api,
+ ixia_testbed,
+ conn_graph_facts,
+ fanout_graph_facts,
+ duthosts,
+ rand_one_dut_hostname,
+ rand_one_dut_portname_oper_up,
+ all_prio_list,
+ prio_dscp_map):
+ """
+ Test PFC watchdog's impact on runtime traffic
+
+ Args:
+ ixia_api (pytest fixture): IXIA session
+ ixia_testbed (pytest fixture): L2/L3 config of a T0 testbed
+ conn_graph_facts (pytest fixture): connection graph
+ fanout_graph_facts (pytest fixture): fanout graph
+ duthosts (pytest fixture): list of DUTs
+ rand_one_dut_hostname (str): hostname of DUT
+ rand_one_dut_portname_oper_up (str): port to test, e.g., 's6100-1|Ethernet0'
+ all_prio_list (pytest fixture): list of all the priorities
+ prio_dscp_map (pytest fixture): priority vs. DSCP map (key = priority)
+
+ Returns:
+ N/A
+ """
+ dut_hostname, dut_port = rand_one_dut_portname_oper_up.split('|')
+ pytest_require(rand_one_dut_hostname == dut_hostname,
+ "Port is not mapped to the expected DUT")
+
+ duthost = duthosts[rand_one_dut_hostname]
+
+ run_pfcwd_runtime_traffic_test(api=ixia_api,
+ testbed_config=ixia_testbed,
+ conn_data=conn_graph_facts,
+ fanout_data=fanout_graph_facts,
+ duthost=duthost,
+ dut_port=dut_port,
+ prio_list=all_prio_list,
+ prio_dscp_map=prio_dscp_map)
diff --git a/tests/kvmtest.sh b/tests/kvmtest.sh
index 4f74f9342d4..e75e12ea73b 100755
--- a/tests/kvmtest.sh
+++ b/tests/kvmtest.sh
@@ -97,6 +97,7 @@ test_t0() {
monit/test_monit_status.py \
platform_tests/test_advanced_reboot.py \
test_interfaces.py \
+ arp/test_arp_dualtor.py \
bgp/test_bgp_fact.py \
bgp/test_bgp_gr_helper.py \
bgp/test_bgp_speaker.py \
@@ -117,6 +118,7 @@ test_t0() {
snmp/test_snmp_pfc_counters.py \
snmp/test_snmp_queue.py \
snmp/test_snmp_loopback.py \
+ snmp/test_snmp_default_route.py \
syslog/test_syslog.py \
tacacs/test_rw_user.py \
tacacs/test_ro_user.py \
@@ -126,7 +128,8 @@ test_t0() {
test_procdockerstatsd.py \
iface_namingmode/test_iface_namingmode.py \
platform_tests/test_cpu_memory_usage.py \
- bgp/test_bgpmon.py"
+ bgp/test_bgpmon.py \
+ process_monitoring/test_critical_process_monitoring.py"
pushd $SONIC_MGMT_DIR/tests
./run_tests.sh $RUNTEST_CLI_COMMON_OPTS -c "$tests" -p logs/$tgname
@@ -162,7 +165,8 @@ test_t1_lag() {
lldp/test_lldp.py \
route/test_default_route.py \
platform_tests/test_cpu_memory_usage.py \
- bgp/test_bgpmon.py"
+ bgp/test_bgpmon.py \
+ process_monitoring/test_critical_process_monitoring.py"
pushd $SONIC_MGMT_DIR/tests
./run_tests.sh $RUNTEST_CLI_COMMON_OPTS -c "$tests" -p logs/$tgname
@@ -195,9 +199,6 @@ export ANSIBLE_LIBRARY=$SONIC_MGMT_DIR/ansible/library/
# workaround for issue https://github.com/Azure/sonic-mgmt/issues/1659
export ANSIBLE_KEEP_REMOTE_FILES=1
-# clear cache from previous test runs
-rm -rf $SONIC_MGMT_DIR/tests/_cache
-
# clear logs from previous test runs
rm -rf $SONIC_MGMT_DIR/tests/logs
mkdir -p $SONIC_MGMT_DIR/tests/logs
diff --git a/tests/nat/conftest.py b/tests/nat/conftest.py
index bed9dc6f838..cf240845d5f 100644
--- a/tests/nat/conftest.py
+++ b/tests/nat/conftest.py
@@ -1,5 +1,6 @@
import re
import copy
+import time
import pytest
@@ -26,6 +27,32 @@ def protocol_type(request):
return request.param
+def pytest_addoption(parser):
+ """
+ Adds options to pytest that are used by the NAT tests.
+ """
+ parser.addoption(
+ "--enable_nat_feature",
+ action="store_true",
+ default=False,
+ help="Enable NAT feature on DUT",
+ )
+
+
+@pytest.fixture(scope='module')
+def config_nat_feature_enabled(request, duthost):
+ """
+ Enable NAT feature if optional argument was provided
+ :param request: pytest request object
+ :param duthost: DUT host object
+ """
+ if request.config.getoption("--enable_nat_feature"):
+ feature_status, _ = duthost.get_feature_status()
+ if feature_status['nat'] == 'disabled':
+ duthost.shell("sudo config feature state nat enabled")
+ time.sleep(2)
+
+
@pytest.fixture(autouse=True)
def teardown(duthost):
"""
@@ -145,12 +172,16 @@ def nat_global_config(duthost):
@pytest.fixture(scope='module', autouse=True)
-def apply_global_nat_config(duthost):
+def apply_global_nat_config(duthost, config_nat_feature_enabled):
"""
applies DUT's global NAT configuration;
after test run cleanup DUT's NAT configration
:param duthost: DUT host object
"""
+ status, _ = duthost.get_feature_status()
+ if 'nat' not in status or status['nat'] == 'disabled':
+ pytest.skip('nat feature is not enabled with image version {}'.format(duthost.os_version))
+
nat_global_config(duthost)
yield
# reload config on teardown
diff --git a/tests/pc/test_lag_2.py b/tests/pc/test_lag_2.py
index 7173afbdd03..61d207be5d4 100644
--- a/tests/pc/test_lag_2.py
+++ b/tests/pc/test_lag_2.py
@@ -5,7 +5,6 @@
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.ptf_runner import ptf_runner
-from tests.common.devices import AnsibleHostBase
from tests.common.fixtures.conn_graph_facts import conn_graph_facts
from tests.common.utilities import wait_until
from tests.common.helpers.assertions import pytest_assert
diff --git a/tests/pc/test_po_cleanup.py b/tests/pc/test_po_cleanup.py
index 8d1b6fd60c5..2de57705b08 100644
--- a/tests/pc/test_po_cleanup.py
+++ b/tests/pc/test_po_cleanup.py
@@ -27,36 +27,38 @@ def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer):
]
loganalyzer[rand_one_dut_hostname].ignore_regex.extend(ignoreRegex)
expectRegex = [
- ".*teamd#teammgrd: :- cleanTeamProcesses.*",
- ".*teamd#teamsyncd: :- cleanTeamSync.*"
+ ".*teammgrd: :- cleanTeamProcesses.*",
+ ".*teamsyncd: :- cleanTeamSync.*"
]
loganalyzer[rand_one_dut_hostname].expect_regex.extend(expectRegex)
-
-def check_kernel_po_interface_cleaned(duthost):
- res = duthost.shell("ip link show | grep -c PortChannel", module_ignore_errors=True)["stdout_lines"][0].decode("utf-8")
+def check_kernel_po_interface_cleaned(duthost, asic_index):
+ namespace = duthost.get_namespace_from_asic_id(asic_index)
+ res = duthost.shell(duthost.get_linux_ip_cmd_for_namespace("ip link show | grep -c PortChannel", namespace),module_ignore_errors=True)["stdout_lines"][0].decode("utf-8")
return res == '0'
+@pytest.fixture(scope="module", autouse=True)
+def check_topo_and_restore(duthosts, rand_one_dut_hostname, tbinfo):
+
+ duthost = duthosts[rand_one_dut_hostname]
+ mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
+
+ if len(mg_facts['minigraph_portchannels'].keys()) == 0 and not duthost.is_multi_asic:
+ pytest.skip("Skip test due to there is no portchannel exists in current topology.")
+ yield
+ # Do config reload to restore everything back
+ logging.info("Reloading config..")
+ config_reload(duthost)
-def test_po_cleanup(duthosts, rand_one_dut_hostname, tbinfo):
+def test_po_cleanup(duthosts, rand_one_dut_hostname, enum_asic_index):
"""
test port channel are cleaned up correctly and teammgrd and teamsyncd process
handle SIGTERM gracefully
"""
duthost = duthosts[rand_one_dut_hostname]
- mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
-
- if len(mg_facts['minigraph_portchannels'].keys()) == 0:
- pytest.skip("Skip test due to there is no portchannel exists in current topology.")
-
- try:
- logging.info("Disable Teamd Feature")
- duthost.shell("sudo systemctl stop teamd")
- # Check if Linux Kernel Portchannel Interface teamdev are clean up
- if not wait_until(10, 1, check_kernel_po_interface_cleaned, duthost):
- fail_msg = "PortChannel interface still exists in kernel"
- pytest.fail(fail_msg)
- finally:
- # Do config reload to restor everything back
- logging.info("Reloading config..")
- config_reload(duthost)
+ logging.info("Disable swss/teamd Feature")
+ duthost.asic_instance(enum_asic_index).stop_service("swss")
+ # Check if Linux Kernel Portchannel Interface teamdev are clean up
+ if not wait_until(10, 1, check_kernel_po_interface_cleaned, duthost, enum_asic_index):
+ fail_msg = "PortChannel interface still exists in kernel"
+ pytest.fail(fail_msg)
diff --git a/tests/pfcwd/conftest.py b/tests/pfcwd/conftest.py
index 1d8ed9d936b..3ae0e3924e3 100644
--- a/tests/pfcwd/conftest.py
+++ b/tests/pfcwd/conftest.py
@@ -4,6 +4,7 @@
from tests.common.fixtures.conn_graph_facts import conn_graph_facts
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
+from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice
from .files.pfcwd_helper import TrafficPorts, set_pfc_timers, select_test_ports
logger = logging.getLogger(__name__)
@@ -27,6 +28,29 @@ def pytest_addoption(parser):
parser.addoption('--fake-storm', action='store', type=bool, default=True,
help='Fake storm for most ports instead of using pfc gen')
+@pytest.fixture(scope="module", autouse=True)
+def skip_pfcwd_test_dualtor(tbinfo):
+ if 'dualtor' in tbinfo['topo']['name']:
+ pytest.skip("Pfcwd tests skipped on dual tor testbed")
+
+ yield
+
+@pytest.fixture(scope="module")
+def fake_storm(request, duthosts, rand_one_dut_hostname):
+ """
+ Enable/disable fake storm based on platform and input parameters
+
+ Args:
+ request: pytest request object
+ duthosts: AnsibleHost instance for multi DUT
+ rand_one_dut_hostname: hostname of DUT
+
+ Returns:
+ fake_storm: False/True
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ return request.config.getoption('--fake-storm') if not isMellanoxDevice(duthost) else False
+
@pytest.fixture(scope="module")
def setup_pfc_test(duthosts, rand_one_dut_hostname, ptfhost, conn_graph_facts, tbinfo):
"""
diff --git a/tests/pfcwd/test_pfcwd_function.py b/tests/pfcwd/test_pfcwd_function.py
index 58dd34809b5..31da3c5200d 100644
--- a/tests/pfcwd/test_pfcwd_function.py
+++ b/tests/pfcwd/test_pfcwd_function.py
@@ -18,6 +18,7 @@
"drop": "Verify proper function of drop action",
"forward": "Verify proper function of forward action"
}
+MMU_ACTIONS = ['change', 'noop', 'restore', 'noop']
pytestmark = [
pytest.mark.disable_loganalyzer,
@@ -84,6 +85,39 @@ def get_queue_oid(dut, port, queue_num):
cmd = "redis-cli -n 2 HGET COUNTERS_QUEUE_NAME_MAP {}:{}".format(port, queue_num)
return dut.command(cmd)['stdout']
+ @staticmethod
+ def update_alpha(dut, profile, value):
+ """
+ Update dynamic threshold value in buffer profile
+
+ Args:
+ dut(AnsibleHost) : dut instance
+ profile(string) : profile name
+ value(int) : dynamic threshold value to update
+ """
+ logger.info("Updating dynamic threshold for {} to {}".format(profile, value))
+ cmd = "redis-cli -n 4 HSET {} dynamic_th {}".format(profile, value)
+ dut.command(cmd)
+
+ @staticmethod
+ def get_mmu_params(dut, port):
+ """
+ Retreive the pg profile for a port and the dynamic threshold used
+
+ Args:
+ dut(AnsibleHost) : dut instance
+ port(string) : port name
+
+ Returns:
+ pg_profile(string), alpha(string)
+ """
+ logger.info("Retreiving pg profile and dynamic threshold for port: {}".format(port))
+ cmd = "redis-cli -n 4 HGET BUFFER_PG|{}|3-4 profile".format(port)
+ pg_profile = dut.command(cmd)['stdout'][1:-1]
+ cmd = "redis-cli -n 4 HGET {} dynamic_th".format(pg_profile)
+ alpha = dut.command(cmd)['stdout']
+ return pg_profile, alpha
+
class PfcPktCntrs(object):
""" PFCwd counter retrieval and verifications """
def __init__(self, dut, action):
@@ -166,7 +200,7 @@ def verify_pkt_cnts(self, port_type, pkt_cnt):
class SetupPfcwdFunc(object):
""" Test setup per port """
- def setup_test_params(self, port, vlan, init=False):
+ def setup_test_params(self, port, vlan, init=False, mmu_params=False):
"""
Sets up test parameters associated with a DUT port
@@ -177,36 +211,82 @@ def setup_test_params(self, port, vlan, init=False):
"""
logger.info("--- Setting up test params for port {} ---".format(port))
self.setup_port_params(port, init=init)
+ if mmu_params:
+ self.setup_mmu_params(port)
self.resolve_arp(vlan)
if not self.pfc_wd['fake_storm']:
self.storm_setup(init=init)
def setup_port_params(self, port, init=False):
- """
- Gather all the parameters needed for storm generation and ptf test based off the DUT port
-
- Args:
- port(string) : DUT port
- """
- self.pfc_wd = dict()
- self.pfc_wd['fake_storm'] = False if init else self.fake_storm
- self.pfc_wd['test_pkt_count'] = 100
- self.pfc_wd['queue_index'] = 4
- self.pfc_wd['frames_number'] = 100000000
- self.pfc_wd['test_port_ids'] = list()
- self.peer_device = self.ports[port]['peer_device']
- self.pfc_wd['test_port'] = port
- self.pfc_wd['rx_port'] = self.ports[port]['rx_port']
- self.pfc_wd['test_neighbor_addr'] = self.ports[port]['test_neighbor_addr']
- self.pfc_wd['rx_neighbor_addr'] = self.ports[port]['rx_neighbor_addr']
- self.pfc_wd['test_port_id'] = self.ports[port]['test_port_id']
- self.pfc_wd['rx_port_id'] = self.ports[port]['rx_port_id']
- self.pfc_wd['port_type'] = self.ports[port]['test_port_type']
- if self.pfc_wd['port_type'] == "portchannel":
- self.pfc_wd['test_port_ids'] = self.ports[port]['test_portchannel_members']
- elif self.pfc_wd['port_type'] in ["vlan", "interface"]:
- self.pfc_wd['test_port_ids'] = self.pfc_wd['test_port_id']
- self.queue_oid = PfcCmd.get_queue_oid(self.dut, port, self.pfc_wd['queue_index'])
+ """
+ Gather all the parameters needed for storm generation and ptf test based off the DUT port
+
+ Args:
+ port(string) : DUT port
+ """
+ self.pfc_wd = dict()
+ self.pfc_wd['fake_storm'] = False if init else self.fake_storm
+ self.pfc_wd['test_pkt_count'] = 100
+ self.pfc_wd['queue_index'] = 4
+ self.pfc_wd['frames_number'] = 100000000
+ self.pfc_wd['test_port_ids'] = list()
+ self.peer_device = self.ports[port]['peer_device']
+ self.pfc_wd['test_port'] = port
+ self.pfc_wd['rx_port'] = self.ports[port]['rx_port']
+ self.pfc_wd['test_neighbor_addr'] = self.ports[port]['test_neighbor_addr']
+ self.pfc_wd['rx_neighbor_addr'] = self.ports[port]['rx_neighbor_addr']
+ self.pfc_wd['test_port_id'] = self.ports[port]['test_port_id']
+ self.pfc_wd['rx_port_id'] = self.ports[port]['rx_port_id']
+ self.pfc_wd['port_type'] = self.ports[port]['test_port_type']
+ if self.pfc_wd['port_type'] == "portchannel":
+ self.pfc_wd['test_port_ids'] = self.ports[port]['test_portchannel_members']
+ elif self.pfc_wd['port_type'] in ["vlan", "interface"]:
+ self.pfc_wd['test_port_ids'] = self.pfc_wd['test_port_id']
+ self.queue_oid = PfcCmd.get_queue_oid(self.dut, port, self.pfc_wd['queue_index'])
+
+ def update_queue(self, port):
+ """
+ Switch between queue 3 and 4 during the test
+
+ Args:
+ port(string) : DUT port
+ """
+ if self.pfc_wd['queue_index'] == 4:
+ self.pfc_wd['queue_index'] = self.pfc_wd['queue_index'] - 1
+ else:
+ self.pfc_wd['queue_index'] = self.pfc_wd['queue_index'] + 1
+ logger.info("Current queue: {}".format(self.pfc_wd['queue_index']))
+ self.queue_oid = PfcCmd.get_queue_oid(self.dut, port, self.pfc_wd['queue_index'])
+
+ def setup_mmu_params(self, port):
+ """
+ Retrieve the pg profile and alpha values of the port under test
+
+ Args:
+ port(string) : DUT port
+ """
+ self.pg_profile, self.alpha = PfcCmd.get_mmu_params(self.dut, port)
+
+ def update_mmu_params(self, mmu_action):
+ """
+ Update dynamic threshold value
+
+ Args:
+ mmu_action(string): for value "change", update within -6 and 3
+ for value "restore", set back to original threshold
+ """
+ if int(self.alpha) <= -6:
+ new_alpha = -5
+ elif int(self.alpha) >= 3:
+ new_alpha = 2
+ else:
+ new_alpha = int(self.alpha) + 1
+
+ if mmu_action == "change":
+ PfcCmd.update_alpha(self.dut, self.pg_profile, new_alpha)
+ elif mmu_action == "restore":
+ PfcCmd.update_alpha(self.dut, self.pg_profile, self.alpha)
+ time.sleep(2)
def resolve_arp(self, vlan):
"""
@@ -243,6 +323,7 @@ def storm_setup(self, init=False):
pfc_queue_idx=self.pfc_wd['queue_index'],
pfc_frames_number=self.pfc_wd['frames_number'],
peer_info=peer_info)
+ self.storm_hndle.update_queue_index(self.pfc_wd['queue_index'])
self.storm_hndle.update_peer_info(peer_info)
self.storm_hndle.deploy_pfc_gen()
@@ -253,6 +334,7 @@ def storm_setup(self, init=False):
'pfc_fanout_interface': self.neighbors[self.pfc_wd['test_port']]['peerport']
}
+ self.storm_hndle.update_queue_index(self.pfc_wd['queue_index'])
self.storm_hndle.update_peer_info(peer_info)
@@ -337,8 +419,14 @@ def verify_other_pfc_queue(self):
dst_port = "".join(str(self.pfc_wd_test_port_ids)).replace(',', '')
else:
dst_port = "[ " + str(self.pfc_wd_test_port_ids) + " ]"
+
+ if self.pfc_queue_index == 4:
+ other_queue = self.pfc_queue_index - 1
+ else:
+ other_queue = self.pfc_queue_index + 1
+
ptf_params = {'router_mac': self.router_mac,
- 'queue_index': self.pfc_queue_index - 1,
+ 'queue_index': other_queue,
'pkt_count': self.pfc_wd_test_pkt_count,
'port_src': self.pfc_wd_rx_port_id[0],
'port_dst': dst_port,
@@ -359,8 +447,14 @@ def verify_other_pfc_pg(self):
dst_port = "".join(str(self.pfc_wd_rx_port_id)).replace(',', '')
else:
dst_port = "[ " + str(self.pfc_wd_rx_port_id) + " ]"
+
+ if self.pfc_queue_index == 4:
+ other_pg = self.pfc_queue_index - 1
+ else:
+ other_pg = self.pfc_queue_index + 1
+
ptf_params = {'router_mac': self.router_mac,
- 'queue_index': self.pfc_queue_index - 1,
+ 'queue_index': other_pg,
'pkt_count': self.pfc_wd_test_pkt_count,
'port_src': self.pfc_wd_test_port_id,
'port_dst': dst_port,
@@ -482,7 +576,7 @@ def storm_restore_path(self, dut, loganalyzer, port, action):
loganalyzer.analyze(marker)
self.stats.get_pkt_cnts(self.queue_oid, begin=False)
- def run_test(self, dut, port, action):
+ def run_test(self, dut, port, action, mmu_action=None):
"""
Test method that invokes the storm detection and restoration path which includes the traffic
test and associated counter verifications
@@ -494,17 +588,22 @@ def run_test(self, dut, port, action):
"""
logger.info("--- Storm detection path for port {} ---".format(port))
loganalyzer = self.storm_detect_path(dut, port, action)
+
+ if mmu_action is not None:
+ self.update_mmu_params(mmu_action)
+
logger.info("--- Storm restoration path for port {} ---".format(port))
self.storm_restore_path(dut, loganalyzer, port, action)
logger.info("--- Verify PFCwd counters for port {} ---".format(port))
self.stats.verify_pkt_cnts(self.pfc_wd['port_type'], self.pfc_wd['test_pkt_count'])
- def test_pfcwd_actions(self, request, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, fanouthosts):
+ def test_pfcwd_actions(self, request, fake_storm, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, fanouthosts):
"""
PFCwd functional test
Args:
request(object) : pytest request object
+ fake_storm(fixture) : Module scoped fixture for enable/disable fake storm
setup_pfc_test(fixture) : Module scoped autouse fixture for PFCwd
fanout_graph_facts(fixture) : fanout graph info
ptfhost(AnsibleHost) : ptf host instance
@@ -522,10 +621,10 @@ def test_pfcwd_actions(self, request, setup_pfc_test, fanout_graph_facts, ptfhos
self.neighbors = setup_info['neighbors']
dut_facts = self.dut.facts
self.peer_dev_list = dict()
- self.fake_storm = request.config.getoption("--fake-storm")
+ self.fake_storm = fake_storm
+ self.storm_hndle = None
for idx, port in enumerate(self.ports):
- self.storm_hndle = None
logger.info("")
logger.info("--- Testing various Pfcwd actions on {} ---".format(port))
self.setup_test_params(port, setup_info['vlan'], init=not idx)
@@ -550,3 +649,71 @@ def test_pfcwd_actions(self, request, setup_pfc_test, fanout_graph_facts, ptfhos
PfcCmd.set_storm_status(self.dut, self.queue_oid, "disabled")
logger.info("--- Stop PFC WD ---")
self.dut.command("pfcwd stop")
+
+ def test_pfcwd_mmu_change(self, request, fake_storm, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, fanouthosts):
+ """
+ Tests if mmu changes impact Pfcwd functionality
+
+ Test cycles through the following mmu actions (change, noop, restore, noop)
+ 1. Select the lossless queue on 1st iteration. Switch the lossless queue (between 3 and 4) in the remaining iterations
+ 2. Start pfcwd on the selected test port
+ 3. Start pfc storm on selected test port/lossless queue and verify if the storm detected msg is seen in the logs
+ 4. Send traffic with test port/lossless queue as ingress/egress port and ensure that packets are dropped
+ Send traffic with test port/other lossless queue as ingress/egress port and ensure that packets are forwarded
+ 5. Update the dynamic threshold associated with the pg profile attached to the test port if the mmu action is 'change' or 'restore'
+ 6. Stop pfc storm on selected test port/lossless queue and verify if the storm restored msg is seen in the logs
+
+ Args:
+ request(object) : pytest request object
+ fake_storm(fixture) : Module scoped fixture for enable/disable fake storm
+ setup_pfc_test(fixture) : Module scoped autouse fixture for PFCwd
+ fanout_graph_facts(fixture) : fanout graph info
+ ptfhost(AnsibleHost) : ptf host instance
+ duthost(AnsibleHost) : DUT instance
+ rand_one_dut_hostname(string) : randomly pick a dut in multi DUT setup
+ fanouthosts(AnsibleHost): fanout instance
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ setup_info = setup_pfc_test
+ self.fanout_info = fanout_graph_facts
+ self.ptf = ptfhost
+ self.dut = duthost
+ self.fanout = fanouthosts
+ self.timers = setup_info['pfc_timers']
+ self.ports = setup_info['selected_test_ports']
+ key, value = self.ports.items()[0]
+ self.ports = {key: value}
+ port = key
+ self.neighbors = setup_info['neighbors']
+ dut_facts = self.dut.facts
+ self.peer_dev_list = dict()
+ self.fake_storm = fake_storm
+ self.storm_hndle = None
+ logger.info("---- Testing on port {} ----".format(port))
+ self.setup_test_params(port, setup_info['vlan'], init=True, mmu_params=True)
+ self.stats = PfcPktCntrs(self.dut, "drop")
+
+ try:
+ for idx, mmu_action in enumerate(MMU_ACTIONS):
+ self.traffic_inst = SendVerifyTraffic(self.ptf, dut_facts['router_mac'], self.pfc_wd)
+ pfc_wd_restore_time_large = request.config.getoption("--restore-time")
+ # wait time before we check the logs for the 'restore' signature. 'pfc_wd_restore_time_large' is in ms.
+ self.timers['pfc_wd_wait_for_restore_time'] = int(pfc_wd_restore_time_large / 1000 * 2)
+ if idx:
+ self.update_queue(port)
+ self.storm_setup()
+ self.traffic_inst = SendVerifyTraffic(self.ptf, dut_facts['router_mac'], self.pfc_wd)
+ self.run_test(self.dut, port, "drop", mmu_action=mmu_action)
+ self.dut.command("pfcwd stop")
+
+ except Exception as e:
+ pytest.fail(str(e))
+
+ finally:
+ if self.storm_hndle:
+ logger.info("--- Stop pfc storm on port {}".format(port))
+ self.storm_hndle.stop_storm()
+ # restore alpha
+ PfcCmd.update_alpha(self.dut, self.pg_profile, self.alpha)
+ logger.info("--- Stop PFC WD ---")
+ self.dut.command("pfcwd stop")
diff --git a/tests/pfcwd/test_pfcwd_warm_reboot.py b/tests/pfcwd/test_pfcwd_warm_reboot.py
index 6410e171a1a..aa6c15f82fe 100644
--- a/tests/pfcwd/test_pfcwd_warm_reboot.py
+++ b/tests/pfcwd/test_pfcwd_warm_reboot.py
@@ -32,7 +32,7 @@
}
pytestmark = [pytest.mark.disable_loganalyzer,
- pytest.mark.topology('any')
+ pytest.mark.topology('t0')
]
logger = logging.getLogger(__name__)
@@ -430,12 +430,13 @@ def stop_all_storm(self):
logger.info("--- Disabling fake storm on port {} queue {}".format(port, queue))
PfcCmd.set_storm_status(self.dut, self.oid_map[(port, queue)], "disabled")
- def pfcwd_wb_helper(self, request, testcase_actions, setup_pfc_test, fanout_graph_facts, ptfhost,
+ def pfcwd_wb_helper(self, fake_storm, testcase_actions, setup_pfc_test, fanout_graph_facts, ptfhost,
duthost, localhost, fanouthosts):
"""
Helper method that initializes the vars and starts the test execution
Args:
+ fake_storm(bool): if fake storm is enabled or disabled
testcase_actions(list): list of actions that the test will go through
setup_pfc_test(fixture): module scoped autouse fixture
fanout_graph_facts(fixture): fanout info
@@ -460,7 +461,7 @@ def pfcwd_wb_helper(self, request, testcase_actions, setup_pfc_test, fanout_grap
storm_deferred = 0
storm_restored = 0
self.max_wait = 0
- self.fake_storm = request.config.getoption("--fake-storm")
+ self.fake_storm = fake_storm
self.oid_map = dict()
self.storm_threads = []
@@ -519,11 +520,12 @@ def testcase_action(self, request):
"""
yield request.param
- def test_pfcwd_wb(self, request, testcase_action, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, localhost, fanouthosts):
+ def test_pfcwd_wb(self, fake_storm, testcase_action, setup_pfc_test, fanout_graph_facts, ptfhost, duthosts, rand_one_dut_hostname, localhost, fanouthosts):
"""
Tests PFCwd warm reboot with various testcase actions
Args:
+ fake_storm(fixture): fake storm status
testcase_action(fixture): testcase to execute (values: 'no_storm', 'storm', 'async_storm')
'no_storm' : PFCwd storm detection/restore before and after warm reboot
@@ -543,5 +545,5 @@ def test_pfcwd_wb(self, request, testcase_action, setup_pfc_test, fanout_graph_f
"""
duthost = duthosts[rand_one_dut_hostname]
logger.info("--- {} ---".format(TESTCASE_INFO[testcase_action]['desc']))
- self.pfcwd_wb_helper(request, TESTCASE_INFO[testcase_action]['test_sequence'], setup_pfc_test,
+ self.pfcwd_wb_helper(fake_storm, TESTCASE_INFO[testcase_action]['test_sequence'], setup_pfc_test,
fanout_graph_facts, ptfhost, duthost, localhost, fanouthosts)
diff --git a/tests/pipelines/cont-warmboot.yml b/tests/pipelines/cont-warmboot.yml
new file mode 100644
index 00000000000..f6115c049ec
--- /dev/null
+++ b/tests/pipelines/cont-warmboot.yml
@@ -0,0 +1,152 @@
+# Starter pipeline
+
+# Start with a minimal pipeline that you can customize to build and deploy your code.
+
+# Add steps that build, run tests, deploy, and more:
+
+# https://aka.ms/yaml
+
+pr:
+- master
+
+name: $(TeamProject)_$(Build.DefinitionName)_$(SourceBranchName)_$(Date:yyyyMMdd)$(Rev:.r)
+
+resources:
+ repositories:
+ - repository: sonic-mgmt
+ type: github
+ name: Azure/sonic-mgmt
+ endpoint: build
+
+parameters:
+- name: iterations
+ type: object
+ default: [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100]
+
+stages:
+- stage: Test
+ pool: sonictest
+ variables:
+ - name: dut
+ value: vlab-01
+ - name: tbname
+ value: vms-kvm-t0
+ - name: inventory
+ value: veos_vtb
+ - name: testbed_file
+ value: vtestbed.csv
+ - name: ptf_name
+ value: ptf_vms6-1
+ - name: continue
+ value: true
+
+
+ jobs:
+ - job: kvmWarmbootTest
+ displayName: "kvmtest"
+ timeoutInMinutes: 0
+ steps:
+ - task: DownloadPipelineArtifact@2
+
+ inputs:
+ source: specific
+ project: build
+ pipeline: 1
+ artifact: sonic-buildimage.vs
+ runVersion: 'latestFromBranch'
+ runBranch: 'refs/heads/master'
+ displayName: "Download sonic kvm image"
+
+ - script: |
+ sudo mkdir -p /data/sonic-vm/images
+ sudo cp -v ../target/sonic-vs.img.gz /data/sonic-vm/images/sonic-vs.img.gz
+ sudo gzip -fd /data/sonic-vm/images/sonic-vs.img.gz
+ username=$(id -un)
+ sudo chown -R $username.$username /data/sonic-vm
+ pushd /data/sonic-mgmt
+ git remote update
+ git reset --hard origin/master
+ sed -i s/use_own_value/${username}/ ansible/veos_vtb
+ echo aaa > ansible/password.txt
+ docker exec sonic-mgmt bash -c "pushd /data/sonic-mgmt/ansible;./testbed-cli.sh -d /data/sonic-vm -m $(inventory) -t $(testbed_file) -k ceos refresh-dut $(tbname) password.txt; ./testbed-cli.sh -m $(inventory) -t $(testbed_file) deploy-mg $(tbname) lab password.txt" && sleep 180
+ displayName: "Setup T0 testbed"
+
+ - ${{ each iter in parameters.iterations }}:
+ - script: |
+ echo " Continue running test: $(continue)" # outputs secondValue
+ if $(continue); then
+ pwd
+ username=$(id -un)
+ rm -rf $(Build.ArtifactStagingDirectory)/*
+
+ IMAGE_LOCATION="https://sonic-jenkins.westus2.cloudapp.azure.com/job/vs/job/buildimage-vs-image-202012/lastSuccessfulBuild/artifact/target/sonic-vs.img.gz"
+ IMAGE_LIST="current"
+ CONTINUOUS_REBOOT_COUNT=1
+ CONTINUOUS_REBOOT_DELAY=600
+
+ CMD="cd /data/sonic-mgmt/tests/ ; \
+ export ANSIBLE_CONFIG=/data/sonic-mgmt/ansible ANSIBLE_LIBRARY=/data/sonic-mgmt/ansible/library/; \
+ pytest platform_tests/test_cont_warm_reboot.py \
+ --continuous_reboot_count=$CONTINUOUS_REBOOT_COUNT \
+ --continuous_reboot_delay=$CONTINUOUS_REBOOT_DELAY \
+ --testbed=$(tbname) --host-pattern=$(tbname) --inventory=/data/sonic-mgmt/ansible/veos_vtb \
+ --testbed_file=/data/sonic-mgmt/ansible/vtestbed.csv --module-path=/data/sonic-mgmt/ansible/library \
+ --show-capture=no --log-cli-level=INFO \
+ --image_location=$IMAGE_LOCATION --image_list=$IMAGE_LIST \
+ --reboot_type=warm --skip_sanity"
+
+ LOG_CMD="mkdir -p /data/sonic-mgmt/tests/logs/continuous_warm_reboot/ ; mv /data/sonic-mgmt/tests/continous_reboot_* /data/sonic-mgmt/tests/logs/continuous_warm_reboot/ || true"
+ echo "Continuous warm reboot test command: $CMD"
+ iteration=${{ iter }}
+ echo "================ iteration ${iteration} started==================="
+ echo $iteration
+ docker exec sonic-mgmt bash -c "rm -rf /data/sonic-mgmt/tests/logs/continuous_warm_reboot || true"
+ docker exec sonic-mgmt bash -c "$CMD"
+
+ if [ "${PIPESTATUS[0]}" -ne "0" ]; then
+ # fail and stop all the remaining batches if pytest fails
+ echo "Iteration ${iteration} failed. Cancel subsequent tests, and saving KVM memdmp"
+ echo "##vso[task.setvariable variable=continue]false"
+
+ # save dut state if test fails
+ virsh_version=$(virsh --version)
+ if [ $virsh_version == "6.0.0" ]; then
+ mkdir -p $(Build.ArtifactStagingDirectory)/kvmdump
+ virsh -c qemu:///system list
+ virsh -c qemu:///system save $(dut) $(Build.ArtifactStagingDirectory)/kvmdump/$(dut).memdmp
+ virsh -c qemu:///system dumpxml $(dut) > $(Build.ArtifactStagingDirectory)/kvmdump/$(dut).xml
+ img=$(virsh -c qemu:///system domblklist $(dut) | grep vda | awk '{print $2}')
+ cp $img $(Build.ArtifactStagingDirectory)/kvmdump/$(dut).img
+ virsh -c qemu:///system undefine $(dut)
+ fi
+ docker commit $(ptf_name) docker-ptf:$(Build.BuildNumber)
+ docker save docker-ptf:$(Build.BuildNumber) | gzip -c > $(Build.ArtifactStagingDirectory)/kvmdump/docker-ptf-dump.gz
+ docker rmi docker-ptf:$(Build.BuildNumber)
+ fi
+ docker exec sonic-mgmt bash -c "$LOG_CMD"
+ cp -r /data/sonic-mgmt/tests/logs $(Build.ArtifactStagingDirectory)/
+ sudo chown -R $username.$username $(Build.ArtifactStagingDirectory)
+ echo "================ iteration ${iteration} completed==================="
+ else
+ echo "Test execution failure caused skipping of remaining tests"
+ exit 2
+ fi
+ displayName: "Run continuous warmboot tests"
+ continueOnError: false
+
+ - publish: $(Build.ArtifactStagingDirectory)/logs/continuous_warm_reboot
+ artifact: sonic-buildimage.kvmtest.log@${{ iter }}
+ displayName: "Archive sonic kvm logs"
+ continueOnError: true
+
+
+ - publish: $(Build.ArtifactStagingDirectory)/kvmdump
+ artifact: sonic-buildimage.kvmtest.memdump@$(System.JobAttempt)
+ displayName: "Archive sonic kvm memdump"
+ condition: failed()
+
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFiles: '$(Build.ArtifactStagingDirectory)/logs/**/*.xml'
+ testRunTitle: kvmtest
+ condition: succeededOrFailed()
\ No newline at end of file
diff --git a/tests/platform_tests/api/conftest.py b/tests/platform_tests/api/conftest.py
index a95153e5a42..80eb8d3ad75 100644
--- a/tests/platform_tests/api/conftest.py
+++ b/tests/platform_tests/api/conftest.py
@@ -10,7 +10,7 @@
IPTABLES_DELETE_RULE_CMD = 'iptables -D INPUT -p tcp -m tcp --dport {} -j ACCEPT'.format(SERVER_PORT)
@pytest.fixture(scope='function')
-def start_platform_api_service(duthost, localhost):
+def start_platform_api_service(duthost, localhost, request):
dut_ip = duthost.setup()['ansible_facts']['ansible_eth0']['ipv4']['address']
res = localhost.wait_for(host=dut_ip,
@@ -21,8 +21,13 @@ def start_platform_api_service(duthost, localhost):
module_ignore_errors=True)
if 'exception' in res:
# TODO: Remove this check once we no longer need to support Python 2
- res = duthost.command('docker exec -i pmon python3 -c "import sonic_platform"', module_ignore_errors=True)
- py3_platform_api_available = not res['failed']
+ if request.cls.__name__ == "TestSfpApi" and duthost.facts.get("asic_type") == "mellanox":
+ # On Mellanox platform, the SFP APIs are not migrated to python3 yet,
+ # thus we have to make it as an exception here.
+ py3_platform_api_available = False
+ else:
+ res = duthost.command('docker exec -i pmon python3 -c "import sonic_platform"', module_ignore_errors=True)
+ py3_platform_api_available = not res['failed']
supervisor_conf = [
'[program:platform_api_server]',
diff --git a/tests/platform_tests/api/test_fan_drawer.py b/tests/platform_tests/api/test_fan_drawer.py
index b09fd0fbe95..37df7aae024 100644
--- a/tests/platform_tests/api/test_fan_drawer.py
+++ b/tests/platform_tests/api/test_fan_drawer.py
@@ -69,6 +69,8 @@ def compare_value_with_platform_facts(self, key, value, fan_drawer_idx):
expected_fan_drawers = self.chassis_facts.get("fan_drawers")
if expected_fan_drawers:
expected_value = expected_fan_drawers[fan_drawer_idx].get(key)
+ if key == "num_fans" and not expected_value:
+ expected_value = len(expected_fan_drawers[fan_drawer_idx].get("fans"))
if self.expect(expected_value is not None,
"Unable to get expected value for '{}' from platform.json file for fan drawer {}".format(key, fan_drawer_idx)):
diff --git a/tests/platform_tests/api/test_fan_drawer_fans.py b/tests/platform_tests/api/test_fan_drawer_fans.py
index 8dbc421e9c3..985ce7c569b 100644
--- a/tests/platform_tests/api/test_fan_drawer_fans.py
+++ b/tests/platform_tests/api/test_fan_drawer_fans.py
@@ -70,7 +70,7 @@ def compare_value_with_platform_facts(self, key, value, fan_drawer_idx, fan_idx)
expected_value = None
if self.chassis_facts:
- expected_fan_drawers = self.chassis_facts.get("fan_drawer")
+ expected_fan_drawers = self.chassis_facts.get("fan_drawers")
if expected_fan_drawers:
expected_fans = expected_fan_drawers[fan_drawer_idx].get("fans")
if expected_fans:
diff --git a/tests/platform_tests/api/test_sfp.py b/tests/platform_tests/api/test_sfp.py
index 210af612b04..73cbba8ddcf 100644
--- a/tests/platform_tests/api/test_sfp.py
+++ b/tests/platform_tests/api/test_sfp.py
@@ -60,7 +60,20 @@ class TestSfpApi(PlatformApiTestBase):
'cable_length',
'specification_compliance',
'nominal_bit_rate',
- 'application_advertisement'
+ ]
+
+ # These are fields which have been added in the common parsers
+ # in sonic-platform-common/sonic_sfp, but since some vendors are
+ # using their own custom parsers, they do not yet provide these
+ # fields. So we treat them differently. Rather than failing the test
+ # if these fields are not present or 'N/A', we will simply log warnings
+ # until all vendors utilize the common parsers. At that point, we should
+ # add these into EXPECTED_XCVR_INFO_KEYS.
+ NEWLY_ADDED_XCVR_INFO_KEYS = [
+ 'type_abbrv_name',
+ 'application_advertisement',
+ 'is_replaceable',
+ 'dom_capability'
]
EXPECTED_XCVR_BULK_STATUS_KEYS = [
@@ -219,7 +232,14 @@ def test_get_transceiver_info(self, duthost, localhost, platform_api_conn):
for key in missing_keys:
self.expect(False, "Transceiver {} info does not contain field: '{}'".format(i, key))
- unexpected_keys = set(actual_keys) - set(self.EXPECTED_XCVR_INFO_KEYS)
+ # TODO: Remove this once we can include these keys in EXPECTED_XCVR_INFO_KEYS
+ for key in self.NEWLY_ADDED_XCVR_INFO_KEYS:
+ if key not in actual_keys:
+ logger.warning("test_get_transceiver_info: Transceiver {} info missing field '{}'. Vendor needs to add support.".format(i, key))
+ elif info_dict[key] == "N/A":
+ logger.warning("test_get_transceiver_info: Transceiver {} info value for '{}' is 'N/A'. Vendor needs to add support.".format(i, key))
+
+ unexpected_keys = set(actual_keys) - set(self.EXPECTED_XCVR_INFO_KEYS + self.NEWLY_ADDED_XCVR_INFO_KEYS)
for key in unexpected_keys:
self.expect(False, "Transceiver {} info contains unexpected field '{}'".format(i, key))
self.assert_expectations()
diff --git a/tests/platform_tests/api/test_thermal.py b/tests/platform_tests/api/test_thermal.py
index 3c960949512..6215711989d 100644
--- a/tests/platform_tests/api/test_thermal.py
+++ b/tests/platform_tests/api/test_thermal.py
@@ -141,9 +141,7 @@ def test_get_temperature(self, duthost, localhost, platform_api_conn):
temperature = thermal.get_temperature(platform_api_conn, i)
if self.expect(temperature is not None, "Unable to retrieve Thermal {} temperature".format(i)):
- if self.expect(isinstance(temperature, float), "Thermal {} temperature appears incorrect".format(i)):
- self.expect(temperature > 0 and temperature <= 100,
- "Thermal {} temperature {} reading is not within range".format(i, temperature))
+ self.expect(isinstance(temperature, float), "Thermal {} temperature appears incorrect".format(i))
self.assert_expectations()
def test_get_low_threshold(self, duthost, localhost, platform_api_conn):
@@ -152,9 +150,7 @@ def test_get_low_threshold(self, duthost, localhost, platform_api_conn):
low_threshold = thermal.get_low_threshold(platform_api_conn, i)
if self.expect(low_threshold is not None, "Unable to retrieve Thermal {} low threshold".format(i)):
- if self.expect(isinstance(low_threshold, float), "Thermal {} low threshold appears incorrect".format(i)):
- self.expect(low_threshold > 0 and low_threshold <= 100,
- "Thermal {} low threshold {} reading is not within range".format(i, low_threshold))
+ self.expect(isinstance(low_threshold, float), "Thermal {} low threshold appears incorrect".format(i))
self.assert_expectations()
def test_get_high_threshold(self, duthost, localhost, platform_api_conn):
@@ -163,9 +159,7 @@ def test_get_high_threshold(self, duthost, localhost, platform_api_conn):
high_threshold = thermal.get_high_threshold(platform_api_conn, i)
if self.expect(high_threshold is not None, "Unable to retrieve Thermal {} high threshold".format(i)):
- if self.expect(isinstance(high_threshold, float), "Thermal {} high threshold appears incorrect".format(i)):
- self.expect(high_threshold > 0 and high_threshold <= 100,
- "Thermal {} high threshold {} reading is not within range".format(i, high_threshold))
+ self.expect(isinstance(high_threshold, float), "Thermal {} high threshold appears incorrect".format(i))
self.assert_expectations()
def test_get_low_critical_threshold(self, duthost, localhost, platform_api_conn):
@@ -174,9 +168,7 @@ def test_get_low_critical_threshold(self, duthost, localhost, platform_api_conn)
low_critical_threshold = thermal.get_low_critical_threshold(platform_api_conn, i)
if self.expect(low_critical_threshold is not None, "Unable to retrieve Thermal {} low critical threshold".format(i)):
- if self.expect(isinstance(low_critical_threshold, float), "Thermal {} low threshold appears incorrect".format(i)):
- self.expect(low_critical_threshold > 0 and low_critical_threshold <= 110,
- "Thermal {} low critical threshold {} reading is not within range".format(i, low_critical_threshold))
+ self.expect(isinstance(low_critical_threshold, float), "Thermal {} low threshold appears incorrect".format(i))
self.assert_expectations()
def test_get_high_critical_threshold(self, duthost, localhost, platform_api_conn):
@@ -185,9 +177,7 @@ def test_get_high_critical_threshold(self, duthost, localhost, platform_api_conn
high_critical_threshold = thermal.get_high_critical_threshold(platform_api_conn, i)
if self.expect(high_critical_threshold is not None, "Unable to retrieve Thermal {} high critical threshold".format(i)):
- if self.expect(isinstance(high_critical_threshold, float), "Thermal {} high threshold appears incorrect".format(i)):
- self.expect(high_critical_threshold > 0 and high_critical_threshold <= 110,
- "Thermal {} high critical threshold {} reading is not within range".format(i, high_critical_threshold))
+ self.expect(isinstance(high_critical_threshold, float), "Thermal {} high threshold appears incorrect".format(i))
self.assert_expectations()
def test_set_low_threshold(self, duthost, localhost, platform_api_conn):
diff --git a/tests/platform_tests/api/watchdog.yml b/tests/platform_tests/api/watchdog.yml
index 3ea43d51554..ac26dba0c75 100644
--- a/tests/platform_tests/api/watchdog.yml
+++ b/tests/platform_tests/api/watchdog.yml
@@ -51,8 +51,15 @@ x86_64-mlnx_msn2700-r0:
greater_timeout: 100
too_big_timeout: 66000
-x86_64-dell_s6100_c2538-r0:
+# Dell watchdog
+x86_64-dell.*:
default:
greater_timeout: 180
- too_big_timeout: 200
+ too_big_timeout: 660
+# Arista watchdog
+x86_64-arista.*:
+ default:
+ valid_timeout: 10
+ greater_timeout: 100
+ too_big_timeout: 660
diff --git a/tests/platform_tests/args/advanced_reboot_args.py b/tests/platform_tests/args/advanced_reboot_args.py
index 9cf2fdfb0de..7f9d7a739e8 100644
--- a/tests/platform_tests/args/advanced_reboot_args.py
+++ b/tests/platform_tests/args/advanced_reboot_args.py
@@ -91,3 +91,11 @@ def add_advanced_reboot_args(parser):
default=None,
help="Script for checking additional states on DUT"
)
+
+ parser.addoption(
+ "--bgp_v4_v6_time_diff",
+ action="store",
+ type=int,
+ default=40,
+ help="Time difference (in sec) between BGP V4 and V6 establishment time"
+ )
diff --git a/tests/platform_tests/broadcom/files/ser_injector.py b/tests/platform_tests/broadcom/files/ser_injector.py
index bd58a919bed..05d038fcb17 100644
--- a/tests/platform_tests/broadcom/files/ser_injector.py
+++ b/tests/platform_tests/broadcom/files/ser_injector.py
@@ -236,6 +236,64 @@
u'MMU_THDM_MCQE_QUEUE_OFFSET_PIPE0.mmu_xpe0', u'ING_SNAT.ipipe0',
u'MMU_THDM_MCQE_QUEUE_OFFSET_B_PIPE1.mmu_xpe0', u'MMU_THDU_OFFSET_QGROUP_PIPE3.mmu_xpe0',
u'ING_VP_VLAN_MEMBERSHIP.ipipe0', u'MMU_THDU_CONFIG_PORT_PIPE3.mmu_xpe0', u'FP_GLOBAL_MASK_TCAM.ipipe0',
+ ],
+ 'th3' : [
+ # cannot pass
+ u'L3_DEFIP_TCAM_LEVEL1.ipipe0',
+ u'MATCH_LOGICAL_TABLE_SELECT_PIPE7.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE7.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE7.mmu_eb0',
+ u'L3_ENTRY_ONLY_SINGLE.ipipe0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE6.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE3.mmu_eb0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE0.ipipe0',
+ u'L3_ENTRY_SINGLE.ipipe0',
+ u'L2_ENTRY.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE6.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE0.ipipe0',
+ u'L3_DEFIP_ALPM_LEVEL3.ipipe0',
+ u'L3_ENTRY_DOUBLE.ipipe0',
+ u'L3_TUNNEL_QUAD.ipipe0',
+ u'L3_DEFIP_PAIR_LEVEL1.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE3.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE1.ipipe0',
+ u'L3_ENTRY_ONLY_DOUBLE.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE0.mmu_eb0',
+ u'L3_DEFIP_ALPM_LEVEL2.ipipe0',
+ u'EGR_IP_TUNNEL_IPV6.epipe0',
+ u'EXACT_MATCH_ECC.ipipe0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE3.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE5.ipipe0',
+ u'L3_DEFIP_ALPM_LEVEL3_SINGLE.ipipe0',
+ u'IFP_LOGICAL_TABLE_SELECT.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE5.mmu_eb0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE2.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE2.ipipe0',
+ u'L3_ENTRY_QUAD.ipipe0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE1.ipipe0',
+ u'EGR_IP_TUNNEL_MPLS.epipe0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE5.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE4.mmu_eb0',
+ u'L2_USER_ENTRY.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE6.mmu_eb0',
+ u'MY_STATION_TCAM.ipipe0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE4.ipipe0',
+ u'L3_DEFIP_LEVEL1.ipipe0' ,
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE2.mmu_eb0',
+ u'L3_DEFIP_ALPM_LEVEL2_SINGLE.ipipe0',
+ u'L3_TUNNEL_DOUBLE.ipipe0',
+ u'L3_ENTRY_ONLY_QUAD.ipipe0',
+ u'IFP_LOGICAL_TABLE_SELECT_PIPE7.ipipe0',
+ u'MMU_QSCH_L2_WEIGHT_MEM_PIPE1.mmu_eb0',
+ u'MPLS_ENTRY_SINGLE.ipipe0',
+ u'CPU_COS_MAP.ipipe0',
+ u'L3_TUNNEL_SINGLE.ipipe0',
+ u'L3_DEFIP_ALPM_LEVEL2_HIT_ONLY.ipipe0',
+ u'L2_ENTRY_ONLY_SINGLE.ipipe0',
+ u'L3_DEFIP_LEVEL1_HIT_ONLY.ipipe0',
+ u'EXACT_MATCH_LOGICAL_TABLE_SELECT_PIPE4.ipipe0',
+ u'L3_DEFIP_ALPM_LEVEL3_HIT_ONLY.ipipe0'
]
}
@@ -273,6 +331,8 @@ def get_asic_name():
asic = "td2"
elif "Broadcom Limited Device b870" in output:
asic = "td3"
+ elif "Broadcom Limited Device b980" in output:
+ asic = "th3"
return asic
diff --git a/tests/platform_tests/cli/test_show_chassis_module.py b/tests/platform_tests/cli/test_show_chassis_module.py
new file mode 100644
index 00000000000..cbc07706dcd
--- /dev/null
+++ b/tests/platform_tests/cli/test_show_chassis_module.py
@@ -0,0 +1,83 @@
+import logging
+import pytest
+from tests.common.helpers.assertions import pytest_assert
+from util import get_field_range, get_fields, get_skip_mod_list
+
+logger = logging.getLogger('__name__')
+
+pytestmark = [
+ pytest.mark.topology('t2')
+]
+
+CMD_SHOW_CHASSIS_MODULE = "show chassis-module"
+
+
+def parse_chassis_module(output, expected_headers):
+ assert len(output) > 2
+ f_ranges = get_field_range(output[1])
+ headers = get_fields(output[0], f_ranges)
+
+ for header_v in expected_headers:
+ pytest_assert(header_v in headers, "Missing header {}".format(header_v))
+
+ result = {}
+ for a_line in output[2:]:
+ field_val = get_fields(a_line, f_ranges)
+ mod_idx = field_val[0]
+ result[mod_idx] = {}
+ cur_field = 1
+ for a_header in headers[1:]:
+ result[mod_idx][a_header] = field_val[cur_field]
+ cur_field += 1
+
+ return result
+
+
+def test_show_chassis_module_status(duthosts, enum_dut_hostname):
+ cmd = " ".join([CMD_SHOW_CHASSIS_MODULE, "status"])
+ logger.info("verifying output of cli command {}".format(cmd))
+ duthost = duthosts[enum_dut_hostname]
+ exp_headers = ["Name", "Description", "Physical-Slot", "Oper-Status", "Admin-Status"]
+ skip_mod_list = get_skip_mod_list(duthost)
+
+ output = duthost.command(cmd)
+ res = parse_chassis_module(output['stdout_lines'], exp_headers)
+
+ # by default will assume all modules should be shown online except in skip_module_list
+ for mod_idx in res.keys():
+ if mod_idx in skip_mod_list:
+ pytest_assert(res[mod_idx]['Oper-Status'] == 'Empty',
+ "Oper-status for slot {} should be Empty but it is {}".format(
+ mod_idx, res[mod_idx]['Oper-Status']))
+ else:
+ pytest_assert(res[mod_idx]['Oper-Status'] == 'Online',
+ "Oper-status for slot {} should be Online but it is {}".format(
+ mod_idx, res[mod_idx]['Oper-Status']))
+
+
+def test_show_chassis_module_midplane_status(duthosts, enum_dut_hostname, skip_module_list):
+ """
+ @summary: Verify output of `show chassis-module midplane-status`
+ """
+ cmd = " ".join([CMD_SHOW_CHASSIS_MODULE, "midplane-status"])
+ logger.info("verifying output of cli command {}".format(cmd))
+ expected_headers = ["Name", "IP-Address", "Reachability"]
+
+ duthost = duthosts[enum_dut_hostname]
+ output = duthost.command(cmd)
+ res_mid_status = parse_chassis_module(output['stdout_lines'], expected_headers)
+ mod_key= ['line-cards']
+ skip_mod_list = get_skip_mod_list(duthost, mod_key)
+
+ for mod_idx in res_mid_status:
+ mod_mid_status = res_mid_status[mod_idx]['Reachability']
+ if mod_idx in skip_mod_list:
+ pytest_assert(res_mid_status[mod_idx]['Reachability'] == "False",
+ "reachability of line card {} expected false but is {}".format(mod_idx, mod_mid_status))
+ else:
+ pytest_assert(mod_mid_status == "True",
+ "midplane reachability of line card {} expected true but is {}".format(mod_idx,
+ mod_mid_status))
+
+
+
diff --git a/tests/platform_tests/cli/test_show_platform.py b/tests/platform_tests/cli/test_show_platform.py
index f3bcf8395e7..d00e92f3a20 100644
--- a/tests/platform_tests/cli/test_show_platform.py
+++ b/tests/platform_tests/cli/test_show_platform.py
@@ -9,6 +9,7 @@
# TODO: Add tests for `show platform firmware updates`
# TODO: Add tests for `show platform firmware version`
+import json
import logging
import re
@@ -18,6 +19,7 @@
from pkg_resources import parse_version
from tests.common.helpers.assertions import pytest_assert
from tests.common.platform.daemon_utils import check_pmon_daemon_status
+from tests.common.utilities import get_inventory_files, get_host_visible_vars
pytestmark = [
pytest.mark.sanity_check(skip_sanity=True),
@@ -30,15 +32,20 @@
THERMAL_CONTROL_TEST_WAIT_TIME = 65
THERMAL_CONTROL_TEST_CHECK_INTERVAL = 5
+@pytest.fixture(scope='module')
+def dut_vars(duthosts, enum_rand_one_per_hwsku_hostname, request):
+ inv_files = get_inventory_files(request)
+ dut_vars = get_host_visible_vars(inv_files, enum_rand_one_per_hwsku_hostname)
+ yield dut_vars
-def test_show_platform_summary(duthosts, rand_one_dut_hostname):
+def test_show_platform_summary(duthosts, enum_rand_one_per_hwsku_hostname, dut_vars):
"""
@summary: Verify output of `show platform summary`
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cmd = " ".join([CMD_SHOW_PLATFORM, "summary"])
- logging.info("Verifying output of '{}' ...".format(cmd))
+ logging.info("Verifying output of '{}' on '{}'...".format(cmd, duthost.hostname))
summary_output_lines = duthost.command(cmd)["stdout_lines"]
summary_dict = util.parse_colon_speparated_lines(summary_output_lines)
expected_fields = set(["Platform", "HwSKU", "ASIC"])
@@ -46,26 +53,89 @@ def test_show_platform_summary(duthosts, rand_one_dut_hostname):
new_field = set(["ASIC Count"])
missing_fields = expected_fields - actual_fields
- pytest_assert(len(missing_fields) == 0, "Output missing fields: {}".format(repr(missing_fields)))
+ pytest_assert(len(missing_fields) == 0, "Output missing fields: {} on '{}'".format(repr(missing_fields), duthost.hostname))
unexpected_fields = actual_fields - expected_fields
- pytest_assert(((unexpected_fields == new_field) or len(unexpected_fields) == 0), "Unexpected fields in output: {}".format(repr(unexpected_fields)))
+ pytest_assert(((unexpected_fields == new_field) or len(unexpected_fields) == 0),
+ "Unexpected fields in output: {} on '{}'".format(repr(unexpected_fields), duthost.hostname))
- # TODO: Test values against platform-specific expected data instead of testing for missing values
+ # Testing for missing values
for key in expected_fields:
- pytest_assert(summary_dict[key], "Missing value for '{}'".format(key))
-
-
-def test_show_platform_syseeprom(duthosts, rand_one_dut_hostname):
+ pytest_assert(summary_dict[key], "Missing value for '{}' on '{}'".format(key, duthost.hostname))
+
+ # Testings values against values defined in the inventory if present in the inventory.
+ # hwsku based on 'hwsku' or 'sonic_hwsku' inventory variable.
+ # platform based on 'sonic_hw_platform' inventory variable.
+ # asic based on 'asic_type' inventory variable.
+ # num_asic on 'num_asics' inventory variable
+ expected_hwsku = dut_vars['hwsku'] if 'hwsku' in dut_vars else None
+ if not expected_hwsku:
+ # Lets try 'sonic_hwsku' as well
+ expected_hwsku = dut_vars['sonic_hwsku'] if 'sonic_hwsku' in dut_vars else None
+ expected_platform = dut_vars['sonic_hw_platform'] if 'sonic_hw_platform' in dut_vars else None
+ expected_asic = dut_vars['asic_type'] if 'asic_type' in dut_vars else None
+ expected_num_asic = str(dut_vars['num_asics']) if 'num_asics' in dut_vars else None
+
+ expected_fields_values = {expected_platform, expected_hwsku, expected_asic}
+ if len(unexpected_fields) != 0:
+ expected_fields_values.add(expected_num_asic)
+
+ actual_fields_values = set(summary_dict.values())
+ diff_fields_values = expected_fields_values.difference(actual_fields_values)
+ pytest_assert((len(diff_fields_values) == 0 or (len(diff_fields_values) == 1 and diff_fields_values.pop() is None)),
+ "Unexpected value of fields, actual={}, expected={} on host '{}'".format(actual_fields_values, expected_fields_values, duthost.hostname))
+
+
+def test_show_platform_syseeprom(duthosts, enum_rand_one_per_hwsku_hostname, dut_vars):
"""
@summary: Verify output of `show platform syseeprom`
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cmd = " ".join([CMD_SHOW_PLATFORM, "syseeprom"])
- logging.info("Verifying output of '{}' ...".format(cmd))
- syseeprom_output = duthost.command(cmd)["stdout"]
- # TODO: Gather expected data from a platform-specific data file instead of this method
+ logging.info("Verifying output of '{}' on '{}' ...".format(cmd, duthost.hostname))
+ syseeprom_cmd = duthost.command(cmd)
+ syseeprom_output = syseeprom_cmd["stdout"]
+ syseeprom_output_lines = syseeprom_cmd["stdout_lines"]
+
+ """
+ Gather expected data from a inventory file instead if 'syseeprom_info' is defined in the inventory
+ # Sample inventory with syseeprom:
+
+ str-msn2700-01:
+ ansible_host: 10.251.0.188
+ model: MSN2700-CS2FO
+ serial: MT1234X56789
+ base_mac: 24:8a:07:12:34:56
+ syseeprom_info:
+ "0x21": "MSN2700"
+ "0x22": "MSN2700-CS2FO"
+ "0x23": "MT1234X56789"
+ "0x24": "24:8a:07:12:34:56"
+ "0x25": "12/07/2016"
+ "0x26": "0"
+ "0x28": "x86_64-mlnx_x86-r0"
+ "0x29": "2016.11-5.1.0008-9600"
+ "0x2A": "128"
+ "0x2B": "Mellanox"
+ "0xFE": "0xFBA1E964"
+ """
+ if 'syseeprom_info' in dut_vars:
+ expected_syseeprom_info_dict = dut_vars['syseeprom_info']
+
+ parsed_syseeprom = {}
+ # Can't use util.get_fields as the values go beyond the last set of '---' in the hearder line.
+ regex_int = re.compile(r'([\S\s]+)(0x[A-F0-9]+)\s+([\d]+)\s+([\S\s]*)')
+ for line in syseeprom_output_lines[6:]:
+ t1 = regex_int.match(line)
+ if t1:
+ parsed_syseeprom[t1.group(2).strip()] = t1.group(4).strip()
+
+ for field in expected_syseeprom_info_dict:
+ pytest_assert(field in parsed_syseeprom, "Expected field '{}' not present in syseeprom on '{}'".format(field, duthost.hostname))
+ pytest_assert(parsed_syseeprom[field] == expected_syseeprom_info_dict[field],
+ "System EEPROM info is incorrect - for '{}', rcvd '{}', expected '{}' on '{}'".
+ format(field, parsed_syseeprom[field], expected_syseeprom_info_dict[field], duthost.hostname))
if duthost.facts["asic_type"] in ["mellanox"]:
expected_fields = [
@@ -82,35 +152,72 @@ def test_show_platform_syseeprom(duthosts, rand_one_dut_hostname):
"CRC-32"]
utility_cmd = "sudo python -c \"import imp; \
- m = imp.load_source('eeprom', '/usr/share/sonic/device/%s/plugins/eeprom.py'); \
- t = m.board('board', '', '', ''); e = t.read_eeprom(); t.decode_eeprom(e)\"" % duthost.facts["platform"]
+ m = imp.load_source('eeprom', '/usr/share/sonic/device/{}/plugins/eeprom.py'); \
+ t = m.board('board', '', '', ''); e = t.read_eeprom(); t.decode_eeprom(e)\"".format(duthost.facts["platform"])
utility_cmd_output = duthost.command(utility_cmd)
for field in expected_fields:
- pytest_assert(syseeprom_output.find(field) >= 0, "Expected field '{}' was not found".format(field))
- pytest_assert(utility_cmd_output["stdout"].find(field) >= 0, "Expected field '{}' was not found".format(field))
+ pytest_assert(syseeprom_output.find(field) >= 0, "Expected field '{}' was not found on '{}'".format(field, duthost.hostname))
+ pytest_assert(utility_cmd_output["stdout"].find(field) >= 0, "Expected field '{}' was not found on '{}'".format(field, duthost.hostname))
for line in utility_cmd_output["stdout_lines"]:
- pytest_assert(line in syseeprom_output, "Line '{}' was not found in output".format(line))
-
+ pytest_assert(line in syseeprom_output, "Line '{}' was not found in output on '{}'".format(line, duthost.hostname))
-def test_show_platform_psustatus(duthosts, rand_one_dut_hostname):
+def test_show_platform_psustatus(duthosts, enum_supervisor_dut_hostname):
"""
@summary: Verify output of `show platform psustatus`
"""
+ duthost = duthosts[enum_supervisor_dut_hostname]
+ logging.info("Check pmon daemon status on dut '{}'".format(duthost.hostname))
+ assert check_pmon_daemon_status(duthost), "Not all pmon daemons running on '{}'".format(duthost.hostname)
+ cmd = " ".join([CMD_SHOW_PLATFORM, "psustatus"])
+
+ logging.info("Verifying output of '{}' on '{}' ...".format(cmd, duthost.hostname))
+ psu_status_output_lines = duthost.command(cmd)["stdout_lines"]
+
+ if "201811" in duthost.os_version or "201911" in duthost.os_version:
+ psu_line_pattern = re.compile(r"PSU\s+\d+\s+(OK|NOT OK|NOT PRESENT)")
+ else:
+ psu_line_pattern = re.compile(r"PSU\s+\d+\s+\w+\s+\w+\s+\w+\s+\w+\s+\w+\s+(OK|NOT OK|NOT PRESENT)\s+(green|amber|red|off)")
+
+ # Check that all PSUs are showing valid status and also at least one PSU is OK
+ num_psu_ok = 0
+
+ for line in psu_status_output_lines[2:]:
+ psu_match = psu_line_pattern.match(line)
+ pytest_assert(psu_match, "Unexpected PSU status output: '{}' on '{}'".format(line, duthost.hostname))
+ psu_status = psu_match.group(1)
+ if psu_status == "OK":
+ num_psu_ok += 1
+
+ pytest_assert(num_psu_ok > 0, "No PSUs are displayed with OK status on '{}'".format(duthost.hostname))
+
+
+def test_show_platform_psustatus_json(duthosts, rand_one_dut_hostname):
+ """
+ @summary: Verify output of `show platform psustatus --json`
+ """
duthost = duthosts[rand_one_dut_hostname]
+
+ if "201811" in duthost.os_version or "201911" in duthost.os_version:
+ pytest.skip("JSON output not available in this version")
+
logging.info("Check pmon daemon status")
- assert check_pmon_daemon_status(duthost), "Not all pmon daemons running."
+ pytest_assert(check_pmon_daemon_status(duthost), "Not all pmon daemons running.")
- cmd = " ".join([CMD_SHOW_PLATFORM, "psustatus"])
+ cmd = " ".join([CMD_SHOW_PLATFORM, "psustatus", "--json"])
logging.info("Verifying output of '{}' ...".format(cmd))
- psu_status_output_lines = duthost.command(cmd)["stdout_lines"]
- psu_line_pattern = re.compile(r"PSU\s+\d+\s+(OK|NOT OK|NOT PRESENT)")
- for line in psu_status_output_lines[2:]:
- pytest_assert(psu_line_pattern.match(line), "Unexpected PSU status output: '{}'".format(line))
- # TODO: Compare against expected platform-specific output
+ psu_status_output = duthost.command(cmd)["stdout"]
+ psu_info_list = json.loads(psu_status_output)
+
+ # TODO: Compare against expected platform-specific output
+ for psu_info in psu_info_list:
+ expected_keys = ["index", "name", "presence", "status", "led_status", "model", "serial", "voltage", "current", "power"]
+ pytest_assert(all(key in psu_info for key in expected_keys), "Expected key(s) missing from JSON output: '{}'".format(psu_status_output))
+ pytest_assert(psu_info["status"] in ["OK", "NOT OK", "NOT PRESENT"], "Unexpected PSU status value: '{}'".format(psu_info["status"]))
+ pytest_assert(psu_info["led_status"] in ["green", "amber", "red", "off"], "Unexpected PSU led_status value: '{}'".format(psu_info["led_status"]))
def verify_show_platform_fan_output(duthost, raw_output_lines):
@@ -123,108 +230,123 @@ def verify_show_platform_fan_output(duthost, raw_output_lines):
NUM_EXPECTED_COLS = 8
else:
NUM_EXPECTED_COLS = 6
-
- pytest_assert(len(raw_output_lines) > 0, "There must be at least one line of output")
+ fans = {}
+ pytest_assert(len(raw_output_lines) > 0, "There must be at least one line of output on '{}'".format(duthost.hostname))
if len(raw_output_lines) == 1:
- pytest_assert(raw_output_lines[0].encode('utf-8').strip() == "Fan Not detected", "Unexpected fan status output")
+ pytest_assert(raw_output_lines[0].encode('utf-8').strip() == "Fan Not detected", "Unexpected fan status output on '{}'".format(duthost.hostname))
else:
- pytest_assert(len(raw_output_lines) > 2, "There must be at least two lines of output if any fan is detected")
+ pytest_assert(len(raw_output_lines) > 2, "There must be at least two lines of output if any fan is detected on '{}'".format(duthost.hostname))
second_line = raw_output_lines[1]
field_ranges = util.get_field_range(second_line)
- pytest_assert(len(field_ranges) == NUM_EXPECTED_COLS, "Output should consist of {} columns".format(NUM_EXPECTED_COLS))
+ field_names = util.get_fields(raw_output_lines[0], field_ranges)
+ pytest_assert(len(field_ranges) == NUM_EXPECTED_COLS, "Output should consist of {} columns on '{}'".format(NUM_EXPECTED_COLS, duthost.hostname))
+ fan_num = 0
+ for line in raw_output_lines[2:]:
+ field_values = util.get_fields(line, field_ranges)
+ fans['fan' + str(fan_num)] = {}
+ for field_index, a_field in enumerate(field_names):
+ fans['fan' + str(fan_num)][a_field] = field_values[field_index]
+ fan_num += 1
-def test_show_platform_fan(duthosts, rand_one_dut_hostname):
+ return fans
+
+def test_show_platform_fan(duthosts, enum_supervisor_dut_hostname):
"""
@summary: Verify output of `show platform fan`
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_supervisor_dut_hostname]
cmd = " ".join([CMD_SHOW_PLATFORM, "fan"])
- logging.info("Verifying output of '{}' ...".format(cmd))
+ logging.info("Verifying output of '{}' on '{}' ...".format(cmd, duthost.hostname))
fan_status_output_lines = duthost.command(cmd)["stdout_lines"]
- verify_show_platform_fan_output(duthost, fan_status_output_lines)
+ fans = verify_show_platform_fan_output(duthost, fan_status_output_lines)
- # TODO: Test values against platform-specific expected data
+ # Check that all fans are showing valid status and also at-least one PSU is OK.
+ num_fan_ok = 0
+ for a_fan in fans.values():
+ if a_fan['Status'] == "OK":
+ num_fan_ok += 1
+ pytest_assert(num_fan_ok > 0, " No Fans are displayed with OK status on '{}'".format(duthost.hostname))
-def verify_show_platform_temperature_output(raw_output_lines):
+def verify_show_platform_temperature_output(raw_output_lines, hostname):
"""
@summary: Verify output of `show platform temperature`. Expected output is
"Thermal Not detected" or a table of thermal status data with 8 columns.
"""
NUM_EXPECTED_COLS = 8
- pytest_assert(len(raw_output_lines) > 0, "There must be at least one line of output")
+ pytest_assert(len(raw_output_lines) > 0, "There must be at least one line of output on '{}'".format(hostname))
if len(raw_output_lines) == 1:
- pytest_assert(raw_output_lines[0].encode('utf-8').strip() == "Thermal Not detected", "Unexpected thermal status output")
+ pytest_assert(raw_output_lines[0].encode('utf-8').strip() == "Thermal Not detected", "Unexpected thermal status output on '{}'".format(hostname))
else:
- pytest_assert(len(raw_output_lines) > 2, "There must be at least two lines of output if any thermal is detected")
+ pytest_assert(len(raw_output_lines) > 2, "There must be at least two lines of output if any thermal is detected on '{}'".format(hostname))
second_line = raw_output_lines[1]
field_ranges = util.get_field_range(second_line)
- pytest_assert(len(field_ranges) == NUM_EXPECTED_COLS, "Output should consist of {} columns".format(NUM_EXPECTED_COLS))
+ pytest_assert(len(field_ranges) == NUM_EXPECTED_COLS, "Output should consist of {} columns on '{}'".format(NUM_EXPECTED_COLS, hostname))
-def test_show_platform_temperature(duthosts, rand_one_dut_hostname):
+def test_show_platform_temperature(duthosts, enum_rand_one_per_hwsku_hostname):
"""
@summary: Verify output of `show platform temperature`
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cmd = " ".join([CMD_SHOW_PLATFORM, "temperature"])
- logging.info("Verifying output of '{}' ...".format(cmd))
+ logging.info("Verifying output of '{}' on '{}'...".format(cmd, duthost.hostname))
temperature_output_lines = duthost.command(cmd)["stdout_lines"]
- verify_show_platform_temperature_output(temperature_output_lines)
+ verify_show_platform_temperature_output(temperature_output_lines, duthost.hostname)
# TODO: Test values against platform-specific expected data
-def test_show_platform_ssdhealth(duthosts, rand_one_dut_hostname):
+def test_show_platform_ssdhealth(duthosts, enum_supervisor_dut_hostname):
"""
@summary: Verify output of `show platform ssdhealth`
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_supervisor_dut_hostname]
cmd = " ".join([CMD_SHOW_PLATFORM, "ssdhealth"])
- logging.info("Verifying output of '{}' ...".format(cmd))
+ logging.info("Verifying output of '{}' on ''{}'...".format(cmd, duthost.hostname))
ssdhealth_output_lines = duthost.command(cmd)["stdout_lines"]
ssdhealth_dict = util.parse_colon_speparated_lines(ssdhealth_output_lines)
expected_fields = set(["Device Model", "Health", "Temperature"])
actual_fields = set(ssdhealth_dict.keys())
missing_fields = expected_fields - actual_fields
- pytest_assert(len(missing_fields) == 0, "Output missing fields: {}".format(repr(missing_fields)))
+ pytest_assert(len(missing_fields) == 0, "Output missing fields: {} on '{}'".format(repr(missing_fields), duthost.hostname))
unexpected_fields = actual_fields - expected_fields
- pytest_assert(len(unexpected_fields) == 0, "Unexpected fields in output: {}".format(repr(unexpected_fields)))
+ pytest_assert(len(unexpected_fields) == 0, "Unexpected fields in output: {} on '{}'".format(repr(unexpected_fields), duthost.hostname))
# TODO: Test values against platform-specific expected data instead of testing for missing values
for key in expected_fields:
- pytest_assert(ssdhealth_dict[key], "Missing value for '{}'".format(key))
+ pytest_assert(ssdhealth_dict[key], "Missing value for '{}' on '{}'".format(key, duthost.hostname))
-def verify_show_platform_firmware_status_output(raw_output_lines):
+def verify_show_platform_firmware_status_output(raw_output_lines, hostname):
"""
@summary: Verify output of `show platform firmware status`. Expected output is
a table of firmware data conaining 5 columns.
"""
NUM_EXPECTED_COLS = 5
- pytest_assert(len(raw_output_lines) > 2, "There must be at least two lines of output")
+ pytest_assert(len(raw_output_lines) > 2, "There must be at least two lines of output on '{}'".format(hostname))
second_line = raw_output_lines[1]
field_ranges = util.get_field_range(second_line)
- pytest_assert(len(field_ranges) == NUM_EXPECTED_COLS, "Output should consist of {} columns".format(NUM_EXPECTED_COLS))
+ pytest_assert(len(field_ranges) == NUM_EXPECTED_COLS, "Output should consist of {} columns on '{}'".format(NUM_EXPECTED_COLS, hostname))
-def test_show_platform_firmware_status(duthosts, rand_one_dut_hostname):
+def test_show_platform_firmware_status(duthosts, enum_rand_one_per_hwsku_hostname):
"""
@summary: Verify output of `show platform firmware status`
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_hostname]
cmd = " ".join([CMD_SHOW_PLATFORM, "firmware", "status"])
- logging.info("Verifying output of '{}' ...".format(cmd))
+ logging.info("Verifying output of '{}' on '{}' ...".format(cmd, duthost.hostname))
firmware_output_lines = duthost.command(cmd)["stdout_lines"]
- verify_show_platform_firmware_status_output(firmware_output_lines)
+ verify_show_platform_firmware_status_output(firmware_output_lines, duthost.hostname)
# TODO: Test values against platform-specific expected data
diff --git a/tests/platform_tests/cli/util.py b/tests/platform_tests/cli/util.py
index 44210d91e5c..dc7165004ad 100644
--- a/tests/platform_tests/cli/util.py
+++ b/tests/platform_tests/cli/util.py
@@ -62,3 +62,38 @@ def get_fields(line, field_ranges):
fields.append(field.strip())
return fields
+
+
+def get_skip_mod_list(duthost, mod_key=None):
+ """
+ @summary: utility function returns list of modules / peripherals absent in chassis
+ by default if no keyword passed it will return all from inventory file
+ provides a list under skip_modules: in inventory file for each dut
+ returns a empty list if skip_modules not defined under host in inventory
+ inventory example:
+ DUTHOST:
+ skip_modules:
+ 'line-cards':
+ - LINE-CARD0
+ - LINE-CARD2
+ 'fabric-cards':
+ - FABRIC-CARD3
+ 'psus':
+ - PSU4
+ - PSU5
+ @return a list of modules/peripherals to be skipped in check for platform test
+ """
+
+ skip_mod_list = []
+ dut_vars = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars
+ if 'skip_modules' in dut_vars:
+ if mod_key is None:
+ for mod_type in dut_vars['skip_modules'].keys():
+ for mod_id in dut_vars['skip_modules'][mod_type]:
+ skip_mod_list.append(mod_id)
+ else:
+ for mod_type in mod_key:
+ if mod_type in dut_vars['skip_modules'].keys():
+ for mod_id in dut_vars['skip_modules'][mod_type]:
+ skip_mod_list.append(mod_id)
+ return skip_mod_list
diff --git a/tests/platform_tests/mellanox/check_sysfs.py b/tests/platform_tests/mellanox/check_sysfs.py
index 5b994678e6a..f447e50c0d1 100644
--- a/tests/platform_tests/mellanox/check_sysfs.py
+++ b/tests/platform_tests/mellanox/check_sysfs.py
@@ -7,7 +7,7 @@
from tests.common.mellanox_data import get_platform_data
from tests.common.utilities import wait_until
-MAX_FAN_SPEED_THRESHOLD = 0.1
+MAX_FAN_SPEED_THRESHOLD = 0.15
def check_sysfs(dut):
diff --git a/tests/platform_tests/mellanox/test_thermal_control.py b/tests/platform_tests/mellanox/test_thermal_control.py
index 7f69f849eb7..ad7140c8932 100644
--- a/tests/platform_tests/mellanox/test_thermal_control.py
+++ b/tests/platform_tests/mellanox/test_thermal_control.py
@@ -2,7 +2,6 @@
import operator
import pytest
import random
-import time
from tests.common.mellanox_data import get_platform_data
from tests.common.utilities import wait_until
from tests.platform_tests.thermal_control_test_helper import *
@@ -15,6 +14,8 @@
pytest.mark.topology('any')
]
+logger = logging.getLogger(__name__)
+
THERMAL_CONTROL_TEST_WAIT_TIME = 75
THERMAL_CONTROL_TEST_CHECK_INTERVAL = 5
@@ -40,20 +41,29 @@ def test_dynamic_minimum_table(duthosts, rand_one_dut_hostname, mocker_factory):
temperature = random.randint(0, max_temperature)
trust_state = True if random.randint(0, 1) else False
- logging.info('Testing with temperature={}, trust_state={}'.format(temperature, trust_state))
+ logger.info('Testing with temperature={}, trust_state={}'.format(temperature, trust_state))
expect_minimum_cooling_level = mocker.get_expect_cooling_level(temperature, trust_state)
+ logger.info('Expect minimum cooling level is {}'.format(expect_minimum_cooling_level))
mocker.mock_min_table(temperature, trust_state)
- time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME)
- actual_cooling_level = get_cooling_cur_state(duthost)
- assert actual_cooling_level >= expect_minimum_cooling_level, 'Cooling level {} is less than minimum allowed {}'.format(actual_cooling_level, expect_minimum_cooling_level)
+ assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME,
+ THERMAL_CONTROL_TEST_CHECK_INTERVAL,
+ check_cooling_level_larger_than_minimum,
+ duthost,
+ expect_minimum_cooling_level), \
+ 'Cooling level is less than minimum allowed {}'.format(expect_minimum_cooling_level)
temperature = random.randint(0, max_temperature)
- logging.info('Testing with temperature={}, trust_state={}'.format(temperature, not trust_state))
+ logger.info('Testing with temperature={}, trust_state={}'.format(temperature, not trust_state))
expect_minimum_cooling_level = mocker.get_expect_cooling_level(temperature, not trust_state)
+ logger.info('Expect minimum cooling level is {}'.format(expect_minimum_cooling_level))
mocker.mock_min_table(temperature, not trust_state)
- time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME)
- actual_cooling_level = get_cooling_cur_state(duthost)
- assert actual_cooling_level >= expect_minimum_cooling_level, 'Cooling level {} is less than minimum allowed {}'.format(actual_cooling_level, expect_minimum_cooling_level)
+ assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME,
+ THERMAL_CONTROL_TEST_CHECK_INTERVAL,
+ check_cooling_level_larger_than_minimum,
+ duthost,
+ expect_minimum_cooling_level), \
+ 'Cooling level is less than minimum allowed {}'.format(expect_minimum_cooling_level)
+
@pytest.mark.disable_loganalyzer
@@ -65,76 +75,69 @@ def test_set_psu_fan_speed(duthosts, rand_one_dut_hostname, mocker_factory):
if not hot_swappable:
pytest.skip('The platform {} does not support this test case.'.format(duthost.facts["platform"]))
- logging.info('Create mocker, it may take a few seconds...')
+ psu_max_speed = get_psu_max_speed(duthost)
+ logger.info('Create mocker, it may take a few seconds...')
single_fan_mocker = mocker_factory(duthost, 'SingleFanMocker')
- logging.info('Mock FAN absence...')
+ logger.info('Mock FAN absence...')
single_fan_mocker.mock_absence()
- assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, check_cooling_cur_state, duthost, MAX_COOLING_LEVEL, operator.eq), \
- 'Current cooling state is {}'.format(get_cooling_cur_state(duthost))
-
- logging.info('Wait {} seconds for the policy to take effect...'.format(THERMAL_CONTROL_TEST_WAIT_TIME))
- time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME)
- psu_max_speed = get_psu_max_speed(duthost)
- logging.info('Max PSU fan speed is {}'.format(psu_max_speed))
- for index in range(psu_num):
- speed = get_psu_speed(duthost, index)
- logging.info('Speed for PSU {} fan is {}'.format(index, speed))
- _check_psu_fan_speed_in_range(speed, psu_max_speed, MAX_COOLING_LEVEL)
-
- logging.info('Mock FAN presence...')
+ assert wait_until(THERMAL_CONTROL_TEST_WAIT_TIME * 2,
+ THERMAL_CONTROL_TEST_CHECK_INTERVAL,
+ check_psu_fan_speed,
+ duthost,
+ psu_num,
+ psu_max_speed,
+ operator.eq), 'Wait for PSU fan speed change to full speed failed'
+
+ logger.info('Mock FAN presence...')
single_fan_mocker.mock_presence()
- wait_until(THERMAL_CONTROL_TEST_WAIT_TIME, THERMAL_CONTROL_TEST_CHECK_INTERVAL, check_cooling_cur_state, duthost, MAX_COOLING_LEVEL, operator.ne)
- logging.info('Wait {} seconds for the policy to take effect...'.format(THERMAL_CONTROL_TEST_WAIT_TIME * 2))
- # We have to wait THERMAL_CONTROL_TEST_WAIT_TIME * 2 seconds long here because:
- # Usually we only need wait THERMAL_CONTROL_TEST_WAIT_TIME seconds here to make sure thermal
- # control daemon change the cooling level to proper value, However,
- # there is chance that kernel might change cooling state back to MAX_COOLING_LEVEL after
- # user space thermal control adjust it to dynamic minimum value. So we have to wait longer for the
- # user space thermal control to set fan speed to dynamic minimum value again. It
- # means that we might need wait up to 2 thermal loops here.
- time.sleep(THERMAL_CONTROL_TEST_WAIT_TIME * 2)
- cooling_cur_state = get_cooling_cur_state(duthost)
- if cooling_cur_state == MAX_COOLING_LEVEL:
- cmd_output = str(duthost.command('show platform temperature')['stdout_lines'])
- cmd_output = cmd_output.replace("u'", "").replace(',', " ")
- cmd_output = re.split(r' +',cmd_output)
- cmd_output.pop(0)
- j = 0
- table = []
- while j != len(cmd_output):
- entry = []
- for i in range(8):
- entry.append(cmd_output[j + i])
- table.append(entry)
- j += 8
- pytest.skip('Cooling level is still 10, ignore the rest test.\nIt might happen because the asic temperature is still high.\nCurrent system temperature:\n{}'.format(tabulate(table)))
- logging.info('Cooling level changed to {}'.format(cooling_cur_state))
- if cooling_cur_state < 6: # PSU fan speed will never be less than 60%
- cooling_cur_state = 6
- for index in range(psu_num):
- speed = get_psu_speed(duthost, index)
- logging.info('Speed for PSU {} fan is {}'.format(index, speed))
- _check_psu_fan_speed_in_range(speed, psu_max_speed, cooling_cur_state)
+ wait_result = wait_until(THERMAL_CONTROL_TEST_WAIT_TIME * 2,
+ THERMAL_CONTROL_TEST_CHECK_INTERVAL,
+ check_psu_fan_speed,
+ duthost,
+ psu_num,
+ psu_max_speed,
+ operator.ne)
+
+ if not wait_result:
+ cooling_cur_state = get_cooling_cur_state(duthost)
+ if cooling_cur_state == MAX_COOLING_LEVEL:
+ cmd_output = str(duthost.command('show platform temperature')['stdout_lines'])
+ cmd_output = cmd_output.replace("u'", "").replace(',', " ")
+ cmd_output = re.split(r' +',cmd_output)
+ cmd_output.pop(0)
+ j = 0
+ table = []
+ while j != len(cmd_output):
+ entry = []
+ for i in range(8):
+ entry.append(cmd_output[j + i])
+ table.append(entry)
+ j += 8
+ pytest.skip('Cooling level is still 10, ignore the rest test.\nIt might happen because the asic temperature is still high.\nCurrent system temperature:\n{}'.format(tabulate(table)))
+ else:
+ assert False, 'Wait for PSU fan speed change to normal failed'
def _check_psu_fan_speed_in_range(actual_speed, max_speed, cooling_level):
expect_speed = max_speed * cooling_level / 10.0
+ logger.info('Expect speed: {}, actual speed: {}'.format(expect_speed, actual_speed))
if expect_speed > actual_speed:
- assert actual_speed > expect_speed * (1 - PSU_SPEED_TOLERANCE)
+ return actual_speed > expect_speed * (1 - PSU_SPEED_TOLERANCE)
elif expect_speed < actual_speed:
- assert actual_speed < expect_speed * (1 + PSU_SPEED_TOLERANCE)
+ return actual_speed < expect_speed * (1 + PSU_SPEED_TOLERANCE)
def get_psu_speed(dut, index):
index = index + 1
psu_speed_path = PSU_SPEED_PATH.format(index)
file_stat = dut.stat(path=psu_speed_path)
- if not file_stat["stat"]["exists"]:
- return None
+ assert file_stat["stat"]["exists"], 'Failed to get PSU speed file due to {} does not exist'.format(psu_speed_path)
cmd_output = dut.command('cat {}'.format(psu_speed_path))
try:
- return int(cmd_output['stdout'])
+ speed = int(cmd_output['stdout'])
+ logger.info('Speed for PSU {} fan is {}'.format(index, speed))
+ return speed
except Exception as e:
assert False, 'Bad content in {} - {}'.format(psu_speed_path, e)
@@ -142,7 +145,9 @@ def get_psu_speed(dut, index):
def get_psu_max_speed(dut):
cmd_output = dut.command('cat {}'.format(PSU_MAX_SPEED_PATH))
try:
- return int(cmd_output['stdout'])
+ psu_max_speed = int(cmd_output['stdout'])
+ logger.info('Max PSU fan speed is {}'.format(psu_max_speed))
+ return psu_max_speed
except Exception as e:
assert False, 'Bad content in {} - {}'.format(PSU_MAX_SPEED_PATH, e)
@@ -150,11 +155,41 @@ def get_psu_max_speed(dut):
def get_cooling_cur_state(dut):
cmd_output = dut.command('cat {}'.format(COOLING_CUR_STATE_PATH))
try:
- return int(cmd_output['stdout'])
+ cooling_cur_state = int(cmd_output['stdout'])
+ logger.info('Cooling level is {}'.format(cooling_cur_state))
+ return cooling_cur_state
except Exception as e:
assert False, 'Bad content in {} - {}'.format(COOLING_CUR_STATE_PATH, e)
-def check_cooling_cur_state(dut, expect_value, op):
- actual_value = get_cooling_cur_state(dut)
- return op(actual_value, expect_value)
+def check_psu_fan_speed(duthost, psu_num, psu_max_speed, op):
+ """Check if PSU fan speed is in the expect range.
+
+ Args:
+ duthost: DUT host object
+ psu_num: PSU number
+ psu_max_speed: PSU max fan speed
+ op: operator eq or ne which is used to compare actual cooling level with MAX_COOLING_LEVEL
+
+ Returns:
+ [boolean]: True if all PSU fans speed are in a expected range
+ """
+ cooling_cur_state = get_cooling_cur_state(duthost)
+ if not op(cooling_cur_state, MAX_COOLING_LEVEL):
+ return False
+
+ # PSU fan speed will never be less than 60%
+ if cooling_cur_state < 6:
+ cooling_cur_state = 6
+
+ for index in range(psu_num):
+ speed = get_psu_speed(duthost, index)
+ if not _check_psu_fan_speed_in_range(speed, psu_max_speed, cooling_cur_state):
+ return False
+
+ return True
+
+
+def check_cooling_level_larger_than_minimum(duthost, expect_minimum_cooling_level):
+ actual_cooling_level = get_cooling_cur_state(duthost)
+ return actual_cooling_level >= expect_minimum_cooling_level
diff --git a/tests/platform_tests/sfp/__init__.py b/tests/platform_tests/sfp/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/platform_tests/sfp/conftest.py b/tests/platform_tests/sfp/conftest.py
new file mode 100644
index 00000000000..48bf96895d3
--- /dev/null
+++ b/tests/platform_tests/sfp/conftest.py
@@ -0,0 +1,26 @@
+import pytest
+import logging
+import os
+from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer
+
+ans_host = None
+
+
+def teardown_module():
+ logging.info("remove script to retrieve port mapping")
+ file_path = os.path.join('/usr/share/sonic/device', ans_host.facts['platform'], 'plugins/getportmap.py')
+ ans_host.file(path=file_path, state='absent')
+
+
+@pytest.fixture(autouse=True)
+def disable_analyzer_for_mellanox(duthost):
+ if duthost.facts["asic_type"] in ["mellanox"]:
+ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='sfp_cfg')
+ loganalyzer.load_common_config()
+
+ loganalyzer.ignore_regex.append("kernel.*Eeprom query failed*")
+ marker = loganalyzer.init()
+ yield
+
+ if duthost.facts["asic_type"] in ["mellanox"]:
+ loganalyzer.analyze(marker)
diff --git a/tests/platform_tests/sfp/test_sfpshow.py b/tests/platform_tests/sfp/test_sfpshow.py
new file mode 100644
index 00000000000..dc9369095b3
--- /dev/null
+++ b/tests/platform_tests/sfp/test_sfpshow.py
@@ -0,0 +1,56 @@
+"""
+Check SFP status using sfpshow.
+
+This script covers test case 'Check SFP status and configure SFP' in the SONiC platform test plan:
+https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md
+"""
+
+import logging
+import pytest
+
+from util import parse_eeprom
+from util import parse_output
+from util import get_dev_conn
+
+cmd_sfp_presence = "sudo sfpshow presence"
+cmd_sfp_eeprom = "sudo sfpshow eeprom"
+
+
+pytestmark = [
+ pytest.mark.disable_loganalyzer, # disable automatic loganalyzer
+ pytest.mark.topology('any')
+]
+
+
+def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts):
+ """
+ @summary: Check SFP presence using 'sfputil show presence'
+ """
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ global ans_host
+ ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
+
+ logging.info("Check output of '{}'".format(cmd_sfp_presence))
+ sfp_presence = duthost.command(cmd_sfp_presence)
+ parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
+ for intf in dev_conn:
+ assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence)
+ assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
+
+
+def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts):
+ """
+ @summary: Check SFP presence using 'sfputil show presence'
+ """
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ global ans_host
+ ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
+
+ logging.info("Check output of '{}'".format(cmd_sfp_eeprom))
+ sfp_eeprom = duthost.command(cmd_sfp_eeprom)
+ parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"])
+ for intf in dev_conn:
+ assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'"
+ assert parsed_eeprom[intf] == "SFP EEPROM detected"
diff --git a/tests/platform_tests/test_sfp.py b/tests/platform_tests/sfp/test_sfputil.py
similarity index 50%
rename from tests/platform_tests/test_sfp.py
rename to tests/platform_tests/sfp/test_sfputil.py
index 81b1d84e1ba..b8e2e68eaa8 100644
--- a/tests/platform_tests/test_sfp.py
+++ b/tests/platform_tests/sfp/test_sfputil.py
@@ -1,130 +1,73 @@
"""
-Check SFP status and configure SFP
+Check SFP status and configure SFP using sfputil.
This script covers test case 'Check SFP status and configure SFP' in the SONiC platform test plan:
https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md
"""
+
import logging
-import re
-import os
import time
import copy
import pytest
-from tests.common.fixtures.conn_graph_facts import conn_graph_facts
-from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer
-from tests.common.platform.interface_utils import get_port_map
-
-ans_host = None
+from util import parse_eeprom
+from util import parse_output
+from util import get_dev_conn
-def teardown_module():
- logging.info("remove script to retrieve port mapping")
- file_path = os.path.join('/usr/share/sonic/device', ans_host.facts['platform'], 'plugins/getportmap.py')
- ans_host.file(path=file_path, state='absent')
+cmd_sfp_presence = "sudo sfputil show presence"
+cmd_sfp_eeprom = "sudo sfputil show eeprom"
+cmd_sfp_reset = "sudo sfputil reset"
+cmd_sfp_show_lpmode = "sudo sfputil show lpmode"
+cmd_sfp_set_lpmode = "sudo sfputil lpmode"
pytestmark = [
pytest.mark.disable_loganalyzer, # disable automatic loganalyzer
pytest.mark.topology('any')
]
-def parse_output(output_lines):
- """
- @summary: For parsing command output. The output lines should have format 'key value'.
- @param output_lines: Command output lines
- @return: Returns result in a dictionary
- """
- res = {}
- for line in output_lines:
- fields = line.split()
- if len(fields) != 2:
- continue
- res[fields[0]] = fields[1]
- return res
-
-def parse_eeprom(output_lines):
+def test_check_sfputil_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts):
"""
- @summary: Parse the SFP eeprom information from command output
- @param output_lines: Command output lines
- @return: Returns result in a dictionary
+ @summary: Check SFP presence using 'sfputil show presence'
"""
- res = {}
- for line in output_lines:
- if re.match(r"^Ethernet\d+: .*", line):
- fields = line.split(":")
- res[fields[0]] = fields[1].strip()
- return res
-
-def test_check_sfp_status_and_configure_sfp(duthosts, rand_one_dut_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo):
- """
- @summary: Check SFP status and configure SFP
-
- This case is to use the sfputil tool and show command to check SFP status and configure SFP. Currently the
- only configuration is to reset SFP. Commands to be tested:
- * sfputil show presence
- * show interface transceiver presence
- * sfputil show eeprom
- * show interface transceiver eeprom
- * sfputil reset
- """
- duthost = duthosts[rand_one_dut_hostname]
- if duthost.facts["asic_type"] in ["mellanox"]:
- loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='sfp_cfg')
- loganalyzer.load_common_config()
-
- loganalyzer.ignore_regex.append("kernel.*Eeprom query failed*")
- marker = loganalyzer.init()
-
- dev_conn = conn_graph_facts["device_conn"][duthost.hostname]
-
- # Get the interface pertaining to that asic
- portmap = get_port_map(duthost, enum_frontend_asic_index)
- logging.info("Got portmap {}".format(portmap))
-
- if enum_frontend_asic_index is not None:
- # Check if the interfaces of this AISC is present in conn_graph_facts
- dev_conn = {k:v for k, v in portmap.items() if k in conn_graph_facts["device_conn"][duthost.hostname]}
- logging.info("ASIC {} interface_list {}".format(enum_frontend_asic_index, dev_conn))
-
- cmd_sfp_presence = "sudo sfputil show presence"
- cmd_sfp_eeprom = "sudo sfputil show eeprom"
- cmd_sfp_reset = "sudo sfputil reset"
- cmd_xcvr_presence = "show interface transceiver presence"
- cmd_xcvr_eeprom = "show interface transceiver eeprom"
-
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
global ans_host
ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
- logging.info("Check output of '%s'" % cmd_sfp_presence)
+ logging.info("Check output of '{}'".format(cmd_sfp_presence))
sfp_presence = duthost.command(cmd_sfp_presence)
parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
for intf in dev_conn:
- assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence
+ assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence)
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
- logging.info("Check output of '%s'" % cmd_xcvr_presence)
- xcvr_presence = duthost.command(cmd_xcvr_presence)
- parsed_presence = parse_output(xcvr_presence["stdout_lines"][2:])
- for intf in dev_conn:
- assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_xcvr_presence
- assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
+def test_check_sfputil_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts):
+ """
+ @summary: Check SFP presence using 'sfputil show presence'
+ """
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ global ans_host
+ ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
- logging.info("Check output of '%s'" % cmd_sfp_eeprom)
+ logging.info("Check output of '{}'".format(cmd_sfp_eeprom))
sfp_eeprom = duthost.command(cmd_sfp_eeprom)
parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"])
for intf in dev_conn:
assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'"
assert parsed_eeprom[intf] == "SFP EEPROM detected"
- logging.info("Check output of '%s'" % cmd_xcvr_eeprom)
- xcvr_eeprom = duthost.command(cmd_xcvr_eeprom)
- parsed_eeprom = parse_eeprom(xcvr_eeprom["stdout_lines"])
- for intf in dev_conn:
- assert intf in parsed_eeprom, "Interface is not in output of '%s'" % cmd_xcvr_eeprom
- assert parsed_eeprom[intf] == "SFP EEPROM detected"
- logging.info("Test '%s '" % cmd_sfp_reset)
+def test_check_sfputil_reset(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo):
+ """
+ @summary: Check SFP presence using 'sfputil show presence'
+ """
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ global ans_host
+ ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
tested_physical_ports = set()
for intf in dev_conn:
phy_intf = portmap[intf][0]
@@ -133,8 +76,8 @@ def test_check_sfp_status_and_configure_sfp(duthosts, rand_one_dut_hostname, enu
continue
tested_physical_ports.add(phy_intf)
logging.info("resetting {} physical interface {}".format(intf, phy_intf))
- reset_result = duthost.command("%s %s" % (cmd_sfp_reset, intf))
- assert reset_result["rc"] == 0, "'%s %s' failed" % (cmd_sfp_reset, intf)
+ reset_result = duthost.command("{} {}".format(cmd_sfp_reset, intf))
+ assert reset_result["rc"] == 0, "'{} {}' failed".format(cmd_sfp_reset, intf)
time.sleep(5)
logging.info("Wait some time for SFP to fully recover after reset")
time.sleep(60)
@@ -143,26 +86,17 @@ def test_check_sfp_status_and_configure_sfp(duthosts, rand_one_dut_hostname, enu
sfp_presence = duthost.command(cmd_sfp_presence)
parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
for intf in dev_conn:
- assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence
+ assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence)
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
logging.info("Check interface status")
- namespace = duthost.get_namespace_from_asic_id(enum_frontend_asic_index)
mg_facts = duthost.get_extended_minigraph_facts(tbinfo)
- # TODO Remove this logic when minigraph facts supports namespace in multi_asic
- up_ports = mg_facts["minigraph_ports"]
- if enum_frontend_asic_index is not None:
- # Check if the interfaces of this AISC is present in conn_graph_facts
- up_ports = {k:v for k, v in portmap.items() if k in mg_facts["minigraph_ports"]}
- intf_facts = duthost.interface_facts(namespace=namespace, up_ports=up_ports)["ansible_facts"]
+ intf_facts = duthost.interface_facts(up_ports=mg_facts["minigraph_ports"])["ansible_facts"]
assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \
- "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"])
-
- if duthost.facts["asic_type"] in ["mellanox"]:
- loganalyzer.analyze(marker)
+ "Some interfaces are down: {}".format(intf_facts["ansible_interface_link_down_ports"])
-def test_check_sfp_low_power_mode(duthosts, rand_one_dut_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo):
+def test_check_sfputil_low_power_mode(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, tbinfo):
"""
@summary: Check SFP low power mode
@@ -171,39 +105,20 @@ def test_check_sfp_low_power_mode(duthosts, rand_one_dut_hostname, enum_frontend
* sfputil lpmode off
* sfputil lpmode on
"""
- duthost = duthosts[rand_one_dut_hostname]
- asichost = duthost.get_asic(enum_frontend_asic_index)
- if duthost.facts["asic_type"] in ["mellanox"]:
- loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix='sfp_lpm')
- loganalyzer.load_common_config()
-
- loganalyzer.ignore_regex.append("Eeprom query failed")
- marker = loganalyzer.init()
-
- dev_conn = conn_graph_facts["device_conn"][duthost.hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ asichost = duthost.asic_instance(enum_frontend_asic_index)
# Get the interface pertaining to that asic
- portmap = get_port_map(duthost, enum_frontend_asic_index)
- logging.info("Got portmap {}".format(portmap))
-
- if enum_frontend_asic_index is not None:
- # Check if the interfaces of this AISC is present in conn_graph_facts
- dev_conn = {k:v for k, v in portmap.items() if k in conn_graph_facts["device_conn"][duthost.hostname]}
- logging.info("ASIC {} interface_list {}".format(enum_frontend_asic_index, dev_conn))
-
- cmd_sfp_presence = "sudo sfputil show presence"
- cmd_sfp_show_lpmode = "sudo sfputil show lpmode"
- cmd_sfp_set_lpmode = "sudo sfputil lpmode"
-
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
global ans_host
ans_host = duthost
- logging.info("Check output of '%s'" % cmd_sfp_show_lpmode)
+ logging.info("Check output of '{}'".format(cmd_sfp_show_lpmode))
lpmode_show = duthost.command(cmd_sfp_show_lpmode)
parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:])
original_lpmode = copy.deepcopy(parsed_lpmode)
for intf in dev_conn:
- assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode
+ assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode)
assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode"
logging.info("Try to change SFP lpmode")
@@ -231,8 +146,8 @@ def test_check_sfp_low_power_mode(duthosts, rand_one_dut_hostname, enum_frontend
tested_physical_ports.add(phy_intf)
logging.info("setting {} physical interface {}".format(intf, phy_intf))
new_lpmode = "off" if original_lpmode[intf].lower() == "on" else "on"
- lpmode_set_result = duthost.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf))
- assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf)
+ lpmode_set_result = duthost.command("{} {} {}".format(cmd_sfp_set_lpmode, new_lpmode, intf))
+ assert lpmode_set_result["rc"] == 0, "'{} {} {}' failed".format(cmd_sfp_set_lpmode, new_lpmode, intf)
time.sleep(10)
if len(tested_physical_ports) == 0:
@@ -242,7 +157,7 @@ def test_check_sfp_low_power_mode(duthosts, rand_one_dut_hostname, enum_frontend
lpmode_show = duthost.command(cmd_sfp_show_lpmode)
parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:])
for intf in dev_conn:
- assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode
+ assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode)
assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode"
logging.info("Try to change SFP lpmode")
@@ -258,22 +173,22 @@ def test_check_sfp_low_power_mode(duthosts, rand_one_dut_hostname, enum_frontend
tested_physical_ports.add(phy_intf)
logging.info("restoring {} physical interface {}".format(intf, phy_intf))
new_lpmode = original_lpmode[intf].lower()
- lpmode_set_result = duthost.command("%s %s %s" % (cmd_sfp_set_lpmode, new_lpmode, intf))
- assert lpmode_set_result["rc"] == 0, "'%s %s %s' failed" % (cmd_sfp_set_lpmode, new_lpmode, intf)
+ lpmode_set_result = duthost.command("{} {} {}".format(cmd_sfp_set_lpmode, new_lpmode, intf))
+ assert lpmode_set_result["rc"] == 0, "'{} {} {}' failed".format(cmd_sfp_set_lpmode, new_lpmode, intf)
time.sleep(10)
logging.info("Check SFP lower power mode again after changing SFP lpmode")
lpmode_show = duthost.command(cmd_sfp_show_lpmode)
parsed_lpmode = parse_output(lpmode_show["stdout_lines"][2:])
for intf in dev_conn:
- assert intf in parsed_lpmode, "Interface is not in output of '%s'" % cmd_sfp_show_lpmode
+ assert intf in parsed_lpmode, "Interface is not in output of '{}'".format(cmd_sfp_show_lpmode)
assert parsed_lpmode[intf].lower() == "on" or parsed_lpmode[intf].lower() == "off", "Unexpected SFP lpmode"
logging.info("Check sfp presence again after setting lpmode")
sfp_presence = duthost.command(cmd_sfp_presence)
parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
for intf in dev_conn:
- assert intf in parsed_presence, "Interface is not in output of '%s'" % cmd_sfp_presence
+ assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence)
assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
logging.info("Check interface status")
@@ -286,7 +201,4 @@ def test_check_sfp_low_power_mode(duthosts, rand_one_dut_hostname, enum_frontend
up_ports = {k:v for k, v in portmap.items() if k in mg_facts["minigraph_ports"]}
intf_facts = duthost.interface_facts(namespace=namespace, up_ports=up_ports)["ansible_facts"]
assert len(intf_facts["ansible_interface_link_down_ports"]) == 0, \
- "Some interfaces are down: %s" % str(intf_facts["ansible_interface_link_down_ports"])
-
- if duthost.facts["asic_type"] in ["mellanox"]:
- loganalyzer.analyze(marker)
+ "Some interfaces are down: {}".format(intf_facts["ansible_interface_link_down_ports"])
diff --git a/tests/platform_tests/sfp/test_show_intf_xcvr.py b/tests/platform_tests/sfp/test_show_intf_xcvr.py
new file mode 100644
index 00000000000..40d3b0d459f
--- /dev/null
+++ b/tests/platform_tests/sfp/test_show_intf_xcvr.py
@@ -0,0 +1,55 @@
+"""
+Check SFP status using 'show interface transciever'.
+
+This script covers test case 'Check SFP status and configure SFP' in the SONiC platform test plan:
+https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md
+"""
+
+import logging
+import pytest
+
+from util import parse_eeprom
+from util import parse_output
+from util import get_dev_conn
+
+cmd_sfp_presence = "show interface transceiver presence"
+cmd_sfp_eeprom = "show interface transceiver eeprom"
+
+pytestmark = [
+ pytest.mark.disable_loganalyzer, # disable automatic loganalyzer
+ pytest.mark.topology('any')
+]
+
+
+def test_check_sfp_presence(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts):
+ """
+ @summary: Check SFP presence using 'sfputil show presence'
+ """
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ global ans_host
+ ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
+
+ logging.info("Check output of '{}'".format(cmd_sfp_presence))
+ sfp_presence = duthost.command(cmd_sfp_presence)
+ parsed_presence = parse_output(sfp_presence["stdout_lines"][2:])
+ for intf in dev_conn:
+ assert intf in parsed_presence, "Interface is not in output of '{}'".format(cmd_sfp_presence)
+ assert parsed_presence[intf] == "Present", "Interface presence is not 'Present'"
+
+
+def test_check_sfpshow_eeprom(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts):
+ """
+ @summary: Check SFP presence using 'sfputil show presence'
+ """
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ global ans_host
+ ans_host = duthost
+ portmap, dev_conn = get_dev_conn(duthost, conn_graph_facts, enum_frontend_asic_index)
+
+ logging.info("Check output of '{}'".format(cmd_sfp_eeprom))
+ sfp_eeprom = duthost.command(cmd_sfp_eeprom)
+ parsed_eeprom = parse_eeprom(sfp_eeprom["stdout_lines"])
+ for intf in dev_conn:
+ assert intf in parsed_eeprom, "Interface is not in output of 'sfputil show eeprom'"
+ assert parsed_eeprom[intf] == "SFP EEPROM detected"
diff --git a/tests/platform_tests/sfp/util.py b/tests/platform_tests/sfp/util.py
new file mode 100644
index 00000000000..f0cc2b12e2e
--- /dev/null
+++ b/tests/platform_tests/sfp/util.py
@@ -0,0 +1,47 @@
+import re
+import logging
+from tests.common.platform.interface_utils import get_port_map
+
+
+def parse_output(output_lines):
+ """
+ @summary: For parsing command output. The output lines should have format 'key value'.
+ @param output_lines: Command output lines
+ @return: Returns result in a dictionary
+ """
+ res = {}
+ for line in output_lines:
+ fields = line.split()
+ if len(fields) != 2:
+ continue
+ res[fields[0]] = fields[1]
+ return res
+
+
+def parse_eeprom(output_lines):
+ """
+ @summary: Parse the SFP eeprom information from command output
+ @param output_lines: Command output lines
+ @return: Returns result in a dictionary
+ """
+ res = {}
+ for line in output_lines:
+ if re.match(r"^Ethernet\d+: .*", line):
+ fields = line.split(":")
+ res[fields[0]] = fields[1].strip()
+ return res
+
+
+def get_dev_conn(duthost, conn_graph_facts, asic_index):
+ dev_conn = conn_graph_facts["device_conn"][duthost.hostname]
+
+ # Get the interface pertaining to that asic
+ portmap = get_port_map(duthost, asic_index)
+ logging.info("Got portmap {}".format(portmap))
+
+ if asic_index is not None:
+ # Check if the interfaces of this AISC is present in conn_graph_facts
+ dev_conn = {k: v for k, v in portmap.items() if k in conn_graph_facts["device_conn"][duthost.hostname]}
+ logging.info("ASIC {} interface_list {}".format(asic_index, dev_conn))
+
+ return portmap, dev_conn
\ No newline at end of file
diff --git a/tests/platform_tests/test_platform_info.py b/tests/platform_tests/test_platform_info.py
index aaa0d755536..1327670dacd 100644
--- a/tests/platform_tests/test_platform_info.py
+++ b/tests/platform_tests/test_platform_info.py
@@ -4,6 +4,7 @@
This script covers the test case 'Check platform information' in the SONiC platform test plan:
https://github.com/Azure/SONiC/blob/master/doc/pmon/sonic_platform_test_plan.md
"""
+import json
import logging
import re
import time
@@ -20,6 +21,7 @@
]
CMD_PLATFORM_PSUSTATUS = "show platform psustatus"
+CMD_PLATFORM_PSUSTATUS_JSON = "{} --json".format(CMD_PLATFORM_PSUSTATUS)
CMD_PLATFORM_FANSTATUS = "show platform fan"
CMD_PLATFORM_TEMPER = "show platform temperature"
@@ -145,7 +147,11 @@ def check_vendor_specific_psustatus(dut, psu_status_line):
if dut.facts["asic_type"] in ["mellanox"]:
from .mellanox.check_sysfs import check_psu_sysfs
- psu_line_pattern = re.compile(r"PSU\s+(\d)+\s+(OK|NOT OK|NOT PRESENT)")
+ if "201811" in dut.os_version or "201911" in dut.os_version:
+ psu_line_pattern = re.compile(r"PSU\s+(\d)+\s+(OK|NOT OK|NOT PRESENT)")
+ else:
+ psu_line_pattern = re.compile(r"PSU\s+(\d+)\s+\w+\s+\w+\s+\w+\s+\w+\s+\w+\s+(OK|NOT OK|NOT PRESENT)\s+(green|amber|red|off)")
+
psu_match = psu_line_pattern.match(psu_status_line)
psu_id = psu_match.group(1)
psu_status = psu_match.group(2)
@@ -158,21 +164,30 @@ def turn_all_outlets_on(pdu_ctrl):
pytest_require(all_outlet_status and len(all_outlet_status) >= 2, 'Skip the test, cannot to get at least 2 outlet status: {}'.format(all_outlet_status))
for outlet in all_outlet_status:
if not outlet["outlet_on"]:
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_on_outlet(outlet)
time.sleep(5)
def check_all_psu_on(dut, psu_test_results):
- cli_psu_status = dut.command(CMD_PLATFORM_PSUSTATUS)
power_off_psu_list = []
- for line in cli_psu_status["stdout_lines"][2:]:
- fields = line.split()
- psu_test_results[fields[1]] = False
- if " ".join(fields[2:]) == "NOT OK":
- power_off_psu_list.append(fields[1])
+
+ if "201811" in dut.os_version or "201911" in dut.os_version:
+ cli_psu_status = dut.command(CMD_PLATFORM_PSUSTATUS)
+ for line in cli_psu_status["stdout_lines"][2:]:
+ fields = line.split()
+ psu_test_results[fields[1]] = False
+ if " ".join(fields[2:]) == "NOT OK":
+ power_off_psu_list.append(fields[1])
+ else:
+ # Use JSON output
+ cli_psu_status = dut.command(CMD_PLATFORM_PSUSTATUS_JSON)
+ psu_info_list = json.loads(cli_psu_status["stdout"])
+ for psu_info in psu_info_list:
+ if psu_info["status"] == "NOT OK":
+ power_off_psu_list.append(psu_info["index"])
if power_off_psu_list:
- logging.warn('Power off PSU list: {}'.format(power_off_psu_list))
+ logging.warn('Powered off PSUs: {}'.format(power_off_psu_list))
return len(power_off_psu_list) == 0
@@ -210,8 +225,8 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, rand_one_dut_hostname, pd
for outlet in all_outlet_status:
psu_under_test = None
- logging.info("Turn off outlet %s" % str(outlet["outlet_id"]))
- pdu_ctrl.turn_off_outlet(outlet["outlet_id"])
+ logging.info("Turn off outlet {}".format(outlet))
+ pdu_ctrl.turn_off_outlet(outlet)
time.sleep(5)
cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS)
@@ -223,8 +238,8 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, rand_one_dut_hostname, pd
check_vendor_specific_psustatus(duthost, line)
pytest_assert(psu_under_test is not None, "No PSU is turned off")
- logging.info("Turn on outlet %s" % str(outlet["outlet_id"]))
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ logging.info("Turn on outlet {}".format(outlet))
+ pdu_ctrl.turn_on_outlet(outlet)
time.sleep(5)
cli_psu_status = duthost.command(CMD_PLATFORM_PSUSTATUS)
@@ -242,24 +257,22 @@ def test_turn_on_off_psu_and_check_psustatus(duthosts, rand_one_dut_hostname, pd
@pytest.mark.disable_loganalyzer
-def test_show_platform_fanstatus_mocked(duthosts, rand_one_dut_hostname, mocker_factory):
+def test_show_platform_fanstatus_mocked(duthosts, rand_one_dut_hostname, mocker_factory, disable_thermal_policy):
"""
@summary: Check output of 'show platform fan'.
"""
duthost = duthosts[rand_one_dut_hostname]
- # Load an invalid thermal control configuration file here to avoid thermal policy affect the test result
- with ThermalPolicyFileContext(duthost, THERMAL_POLICY_INVALID_FORMAT_FILE):
- # Mock data and check
- mocker = mocker_factory(duthost, 'FanStatusMocker')
- pytest_require(mocker, "No FanStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type'])
+ # Mock data and check
+ mocker = mocker_factory(duthost, 'FanStatusMocker')
+ pytest_require(mocker, "No FanStatusMocker for %s, skip rest of the testing in this case" % duthost.facts['asic_type'])
- logging.info('Mock FAN status data...')
- mocker.mock_data()
- logging.info('Wait and check actual data with mocked FAN status data...')
- result = check_cli_output_with_mocker(duthost, mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2)
+ logging.info('Mock FAN status data...')
+ mocker.mock_data()
+ logging.info('Wait and check actual data with mocked FAN status data...')
+ result = check_cli_output_with_mocker(duthost, mocker, CMD_PLATFORM_FANSTATUS, THERMAL_CONTROL_TEST_WAIT_TIME, 2)
- pytest_assert(result, 'FAN mock data mismatch')
+ pytest_assert(result, 'FAN mock data mismatch')
@pytest.mark.disable_loganalyzer
@@ -379,8 +392,8 @@ def turn_off_outlet_and_check_thermal_control(dut, pdu_ctrl, outlet, mocker):
@summary: Turn off PSUs, check all FAN speed are set to 100% according to thermal
control policy file.
"""
- logging.info("Turn off outlet %s" % str(outlet["psu_id"]))
- pdu_ctrl.turn_off_outlet(outlet["outlet_id"])
+ logging.info("Turn off outlet %s" % str(outlet["outlet_id"]))
+ pdu_ctrl.turn_off_outlet(outlet)
time.sleep(5)
psu_under_test = None
@@ -399,7 +412,7 @@ def turn_off_outlet_and_check_thermal_control(dut, pdu_ctrl, outlet, mocker):
mocker.check_all_fan_speed,
100), 'FAN speed not turn to 100% after PSU off')
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_on_outlet(outlet)
time.sleep(5)
diff --git a/tests/platform_tests/test_power_budget_info.py b/tests/platform_tests/test_power_budget_info.py
new file mode 100644
index 00000000000..e0002a25ab3
--- /dev/null
+++ b/tests/platform_tests/test_power_budget_info.py
@@ -0,0 +1,60 @@
+import logging
+import re
+import json
+import pytest
+from tests.common.helpers.assertions import pytest_assert
+from cli.util import get_skip_mod_list
+
+logger = logging.getLogger('__name__')
+
+pytestmark = [
+ pytest.mark.topology('t2')
+]
+
+
+def test_power_redis_db(duthosts, enum_supervisor_dut_hostname, tbinfo):
+ """
+ @summary: verify the output for power budget policy using
+ redis command for chassis
+ checks for each psu the supplied power
+ checks consumed power for each present module
+ """
+ logger.info("verifying redis dump for power budget")
+ duthost = duthosts[enum_supervisor_dut_hostname]
+ skip_mod_list = get_skip_mod_list(duthost)
+ exp_total_supp_power = 0
+ exp_total_cons_power = 0
+
+ redis_out = duthost.command("redis-dump -d 6 -y -k \"*power*\"")
+ out_dict = json.loads(redis_out['stdout'])
+ power_budget = out_dict.keys()
+
+ for pb_name in power_budget:
+ for out_val in out_dict[pb_name]['value']:
+ if re.match('Supplied Power', out_val):
+ n_psu = (re.split('Supplied Power ', out_val))[1]
+ if n_psu not in skip_mod_list:
+ sup_power = float(out_dict[pb_name]['value'][out_val])
+ pytest_assert(sup_power > 0,
+ "expected supplied power value for psu {} greater than 0 but got {}".format(n_psu, sup_power))
+ exp_total_supp_power += sup_power
+ else:
+ logger.debug("psu {} in skip list skipping check".format(n_psu))
+
+ elif re.match('Consumed Power', out_val):
+ mod_name = (re.split('Consumed Power', out_val))[1]
+ cons_power = float(out_dict[pb_name]['value'][out_val])
+ exp_total_cons_power += cons_power
+ if mod_name not in skip_mod_list:
+ pytest_assert(cons_power > 0,
+ "power consumed values is not expected to be 0 or less for {}".format(mod_name))
+
+ logger.info("verfying total supplied power is expected")
+ tot_supp_power = float(out_dict[pb_name]['value']['Total Supplied Power'])
+ tot_cons_power = float(out_dict[pb_name]['value']['Total Consumed Power'])
+ pytest_assert(exp_total_cons_power == tot_cons_power,
+ "total consumed power is incorrect expected is {} reported is {}".format(
+ exp_total_cons_power, tot_cons_power))
+ pytest_assert(exp_total_supp_power == tot_supp_power,
+ "total supplied power is not correct expected is {} reported is {}".format(
+ exp_total_supp_power, tot_supp_power))
diff --git a/tests/platform_tests/test_reboot.py b/tests/platform_tests/test_reboot.py
index 6deebc2b9f0..5eb4d423bac 100644
--- a/tests/platform_tests/test_reboot.py
+++ b/tests/platform_tests/test_reboot.py
@@ -116,9 +116,26 @@ def test_cold_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_fact
reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_COLD)
+def test_soft_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
+ """
+ @summary: This test case is to perform soft reboot and check platform status
+ """
+
+ duthost = duthosts[rand_one_dut_hostname]
+
+ soft_reboot_supported = duthost.command('which soft-reboot', module_ignore_errors=True)["stdout"]
+ if "" == soft_reboot_supported:
+ pytest.skip("Soft-reboot is not supported on this DUT, skip this test case")
+
+ if duthost.is_multi_asic:
+ pytest.skip("Multi-ASIC devices not supporting soft reboot")
+
+ reboot_and_check(localhost, duthost, conn_graph_facts["device_conn"][duthost.hostname], xcvr_skip_list, reboot_type=REBOOT_TYPE_SOFT)
+
+
def test_fast_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
- @summary: This test case is to perform cold reboot and check platform status
+ @summary: This test case is to perform fast reboot and check platform status
"""
duthost = duthosts[rand_one_dut_hostname]
@@ -131,7 +148,7 @@ def test_fast_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_fact
def test_warm_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list):
"""
- @summary: This test case is to perform cold reboot and check platform status
+ @summary: This test case is to perform warm reboot and check platform status
"""
duthost = duthosts[rand_one_dut_hostname]
@@ -161,12 +178,12 @@ def _power_off_reboot_helper(kwargs):
for outlet in all_outlets:
logging.debug("turning off {}".format(outlet))
- pdu_ctrl.turn_off_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_off_outlet(outlet)
time.sleep(delay_time)
logging.info("Power on {}".format(power_on_seq))
for outlet in power_on_seq:
logging.debug("turning on {}".format(outlet))
- pdu_ctrl.turn_on_outlet(outlet["outlet_id"])
+ pdu_ctrl.turn_on_outlet(outlet)
def test_power_off_reboot(duthosts, rand_one_dut_hostname, localhost, conn_graph_facts, xcvr_skip_list, pdu_controller, power_off_delay):
diff --git a/tests/platform_tests/test_sequential_restart.py b/tests/platform_tests/test_sequential_restart.py
index 52a2e0faab1..aae49c089f2 100644
--- a/tests/platform_tests/test_sequential_restart.py
+++ b/tests/platform_tests/test_sequential_restart.py
@@ -52,8 +52,8 @@ def restart_service_and_check(localhost, dut, enum_frontend_asic_index, service,
"""
logging.info("Restart the %s service on asic %s" %(service, enum_frontend_asic_index))
- asichost = dut.get_asic(enum_frontend_asic_index)
- service_name = asichost.get_service_name(service)
+ asichost = dut.asic_instance(enum_frontend_asic_index)
+ service_name = asichost.get_docker_name(service)
dut.command("sudo systemctl restart {}".format(service_name))
for container in dut.get_default_critical_services_list():
diff --git a/tests/platform_tests/test_xcvr_info_in_db.py b/tests/platform_tests/test_xcvr_info_in_db.py
index 8dfb6d999ba..ef3000dc770 100644
--- a/tests/platform_tests/test_xcvr_info_in_db.py
+++ b/tests/platform_tests/test_xcvr_info_in_db.py
@@ -16,11 +16,11 @@
pytest.mark.topology('any')
]
-def test_xcvr_info_in_db(duthosts, rand_one_dut_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list):
+def test_xcvr_info_in_db(duthosts, enum_rand_one_per_hwsku_frontend_hostname, enum_frontend_asic_index, conn_graph_facts, xcvr_skip_list):
"""
@summary: This test case is to verify that xcvrd works as expected by checking transceiver information in DB
"""
- duthost = duthosts[rand_one_dut_hostname]
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
logging.info("Check transceiver status")
all_interfaces = conn_graph_facts["device_conn"][duthost.hostname]
diff --git a/tests/platform_tests/thermal_control_test_helper.py b/tests/platform_tests/thermal_control_test_helper.py
index 9dd62f7fd8d..a05d0f875fb 100644
--- a/tests/platform_tests/thermal_control_test_helper.py
+++ b/tests/platform_tests/thermal_control_test_helper.py
@@ -337,3 +337,20 @@ def __exit__(self, exc_type, exc_val, exc_tb):
"""
self.dut.command('mv -f {} {}'.format(self.thermal_policy_file_backup_path, self.thermal_policy_file_path))
restart_thermal_control_daemon(self.dut)
+
+
+@pytest.fixture
+def disable_thermal_policy(duthosts, rand_one_dut_hostname):
+ """Fixture to help disable thermal policy during the test. After test, it will
+ automatically re-enable thermal policy. The idea here is to make thermalctld
+ load a invalid policy file. To use this fixture, the test case will probably
+ marked as @pytest.mark.disable_loganalyzer.
+
+ Args:
+ duthosts DUT object representing a SONiC switch under test
+ rand_one_dut_hostname random DUT hostname
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ invalid_policy_file = os.path.join(FILES_DIR, 'invalid_format_policy.json')
+ with ThermalPolicyFileContext(duthost, invalid_policy_file):
+ yield
diff --git a/tests/process_monitoring/test_critical_process_monitoring.py b/tests/process_monitoring/test_critical_process_monitoring.py
new file mode 100755
index 00000000000..75426848375
--- /dev/null
+++ b/tests/process_monitoring/test_critical_process_monitoring.py
@@ -0,0 +1,402 @@
+"""
+Test the feature of monitoring critical processes by Supervisord.
+"""
+from collections import defaultdict
+import logging
+
+import pytest
+
+from pkg_resources import parse_version
+from tests.common import config_reload
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.assertions import pytest_require
+from tests.common.helpers.constants import DEFAULT_ASIC_ID, NAMESPACE_PREFIX
+from tests.common.helpers.dut_utils import get_program_info
+from tests.common.helpers.dut_utils import get_group_program_info
+from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
+from tests.common.utilities import wait_until
+
+logger = logging.getLogger(__name__)
+
+pytestmark = [
+ pytest.mark.topology('any'),
+ pytest.mark.disable_loganalyzer
+]
+
+CONTAINER_CHECK_INTERVAL_SECS = 1
+CONTAINER_RESTART_THRESHOLD_SECS = 180
+
+
+@pytest.fixture(autouse=True, scope='module')
+def config_reload_after_tests(duthost):
+ yield
+ config_reload(duthost)
+
+
+@pytest.fixture(autouse=True, scope='module')
+def disable_and_enable_autorestart(duthost):
+ """Changes the autorestart of containers from `enabled` to `disabled` before testing.
+ and Rolls them back after testing.
+
+ Args:
+ duthost: Hostname of DUT.
+
+ Returns:
+ None.
+ """
+ containers_autorestart_states = duthost.get_container_autorestart_states()
+ disabled_autorestart_containers = []
+
+ for container_name, state in containers_autorestart_states.items():
+ if state == "enabled":
+ logger.info("Disabling the autorestart of container '{}'.".format(container_name))
+ command_disable_autorestart = "sudo config feature autorestart {} disabled".format(container_name)
+ command_output = duthost.shell(command_disable_autorestart)
+ exit_code = command_output["rc"]
+ pytest_assert(exit_code == 0, "Failed to disable the autorestart of container '{}'".format(container_name))
+ logger.info("The autorestart of container '{}' was disabled.".format(container_name))
+ disabled_autorestart_containers.append(container_name)
+
+ yield
+
+ for container_name in disabled_autorestart_containers:
+ logger.info("Enabling the autorestart of container '{}'...".format(container_name))
+ command_output = duthost.shell("sudo config feature autorestart {} enabled".format(container_name))
+ exit_code = command_output["rc"]
+ pytest_assert(exit_code == 0, "Failed to enable the autorestart of container '{}'".format(container_name))
+ logger.info("The autorestart of container '{}' is enabled.".format(container_name))
+
+
+@pytest.fixture(autouse=True, scope="module")
+def check_image_version(duthost):
+ """Skips this test if the SONiC image installed on DUT was 201911 or old version.
+
+ Args:
+ duthost: Hostname of DUT.
+
+ Returns:
+ None.
+ """
+ pytest_require(parse_version(duthost.kernel_version) > parse_version("4.9.0"),
+ "Test was not supported for 201911 and older image versions!")
+
+
+def check_all_critical_processes_running(duthost):
+ """Determine whether all critical processes are running on a DUT.
+
+ Args:
+ duthost: Hostname of DUT.
+
+ Returns:
+ This function will return True if all critical processes are running.
+ Otherwise it will return False.
+ """
+ processes_status = duthost.all_critical_process_status()
+ for container_name, processes in processes_status.items():
+ if processes["status"] is False or len(processes["exited_critical_process"]) > 0:
+ return False
+
+ return True
+
+
+def post_test_check(duthost, up_bgp_neighbors):
+ """Post-checks the status of critical processes and state of BGP sessions.
+
+ Args:
+ duthost: Hostname of DUT.
+ up_bgp_neighbors: An IP list contains the established BGP sessions with
+ this DUT.
+
+ Returns:
+ This function will return True if all critical processes are running and
+ all BGP sessions are established. Otherwise it will return False.
+ """
+ return check_all_critical_processes_running(duthost) and duthost.check_bgp_session_state(up_bgp_neighbors, "established")
+
+
+def postcheck_critical_processes_status(duthost, up_bgp_neighbors):
+ """Calls the sub-functions to post-check the status of critical processes and
+ state of BGP sessions.
+
+ Args:
+ duthost: Hostname of DUT.
+ up_bgp_neighbors: An IP list contains the established BGP sessions with
+ this DUT.
+
+ Returns:
+ If all critical processes are running and all BGP sessions are established, it
+ returns True. Otherwise it will call the function to do post-check every 30 seconds
+ for 3 minutes. It will return False after timeout.
+ """
+ logger.info("Post-checking status of critical processes and BGP sessions...")
+ return wait_until(CONTAINER_RESTART_THRESHOLD_SECS, CONTAINER_CHECK_INTERVAL_SECS,
+ post_test_check, duthost, up_bgp_neighbors)
+
+
+def get_expected_alerting_messages(duthost, containers_in_namespaces):
+ """Generates the regex of expected alerting messages for the critical processes in each namespace.
+
+ Args:
+ duthost: Hostname of DUT.
+ containers_in_namespaces: A dictionary where keys are container names and
+ values are lists which contains ids of namespaces this container should reside in.
+
+ Returns:
+ None.
+ """
+ expected_alerting_messages = []
+
+ for container_name in containers_in_namespaces.keys():
+ logger.info("Generating the expected alerting messages for container '{}'...".format(container_name))
+ critical_group_list, critical_process_list, succeeded = duthost.get_critical_group_and_process_lists(container_name)
+ pytest_assert(succeeded, "Failed to get critical group and process lists of container '{}'".format(container_name))
+
+ namespace_ids = containers_in_namespaces[container_name]
+ for namespace_id in namespace_ids:
+ namespace_name = "host"
+ if namespace_id != DEFAULT_ASIC_ID:
+ namespace_name = NAMESPACE_PREFIX + namespace_id
+
+ for critical_process in critical_process_list:
+ # Skip 'dsserve' process since it was not managed by supervisord
+ # TODO: Should remove the following two lines once the issue was solved in the image.
+ if container_name == "syncd" and critical_process == "dsserve":
+ continue
+ logger.info("Generating the expected alerting message for process '{}'".format(critical_process))
+ expected_alerting_messages.append(".*Process '{}' is not running in namespace '{}'.*".format(critical_process, namespace_name))
+
+ for critical_group in critical_group_list:
+ group_program_info = get_group_program_info(duthost, container_name, critical_group)
+ for program_name in group_program_info:
+ logger.info("Generating the expected alerting message for process '{}'".format(program_name))
+ expected_alerting_messages.append(".*Process '{}' is not running in namespace '{}'.*".format(program_name, namespace_name))
+
+ logger.info("Generating the expected alerting messages for container '{}' was done!".format(container_name))
+
+ return expected_alerting_messages
+
+
+def get_containers_namespace_ids(duthost, skip_containers):
+ """
+ This function will get namespace ids for each running container.
+
+ Args:
+ duthost: Hostname of DUT.
+ skip_containers: A list shows which containers should be skipped for testing.
+
+ Returns:
+ A dictionary where keys are container names and values are a list which contains
+ ids of namespaces this container should reside in such as {lldp: [DEFAULT_ASIC_ID, "0", "1"]}
+ """
+ containers_in_namespaces = defaultdict(list)
+
+ logger.info("Getting the namespace ids for each container...")
+ containers_states, succeed = duthost.get_feature_status()
+ pytest_assert(succeed, "Failed to get feature status of containers!")
+
+ for container_name, state in containers_states.items():
+ if container_name not in skip_containers and state == "enabled":
+ namespace_ids, succeed = duthost.get_namespace_ids(container_name)
+ pytest_assert(succeed, "Failed to get namespace ids of container '{}'".format(container_name))
+ containers_in_namespaces[container_name] = namespace_ids
+
+ logger.info("Getting the namespace ids for each container was done!")
+
+ return containers_in_namespaces
+
+
+def kill_process_by_pid(duthost, container_name, program_name, program_pid):
+ """Kills a process in the specified container by its pid.
+
+ Args:
+ duthost: Hostname of DUT.
+ container_name: A string shows container name.
+ program_name: A string shows process name.
+ program_pid: An integer represents the PID of a process.
+
+ Returns:
+ None.
+ """
+ kill_cmd_result = duthost.shell("docker exec {} kill -SIGKILL {}".format(container_name, program_pid))
+
+ # Get the exit code of 'kill' command
+ exit_code = kill_cmd_result["rc"]
+ pytest_assert(exit_code == 0, "Failed to stop program '{}' before test".format(program_name))
+
+ logger.info("Program '{}' in container '{}' was stopped successfully"
+ .format(program_name, container_name))
+
+
+def check_and_kill_process(duthost, container_name, program_name, program_status, program_pid):
+ """Checks the running status of a critical process. If it is running, kill it. Otherwise,
+ fail this test.
+
+ Args:
+ duthost: Hostname of DUT.
+ container_name: A string shows container name.
+ program_name: A string shows process name.
+ program_pid: An integer represents the PID of a process.
+
+ Returns:
+ None.
+ """
+ if program_status == "RUNNING":
+ kill_process_by_pid(duthost, container_name, program_name, program_pid)
+ elif program_status in ["EXITED", "STOPPED", "STARTING"]:
+ pytest.fail("Program '{}' in container '{}' is in the '{}' state, expected 'RUNNING'"
+ .format(program_name, container_name, program_status))
+ else:
+ pytest.fail("Failed to find program '{}' in container '{}'"
+ .format(program_name, container_name))
+
+
+def stop_critical_processes(duthost, containers_in_namespaces):
+ """Gets critical processes of each running container and then stops them from running.
+
+ Args:
+ duthost: Hostname of DUT.
+ containers_in_namespaces: A dictionary where keys are container names and
+ values are lists which contains ids of namespaces this container should reside in.
+
+ Returns:
+ None.
+ """
+ for container_name in containers_in_namespaces.keys():
+ critical_group_list, critical_process_list, succeeded = duthost.get_critical_group_and_process_lists(container_name)
+ pytest_assert(succeeded, "Failed to get critical group and process lists of container '{}'".format(container_name))
+
+ namespace_ids = containers_in_namespaces[container_name]
+ for namespace_id in namespace_ids:
+ container_name_in_namespace = container_name
+ if namespace_id != DEFAULT_ASIC_ID:
+ container_name_in_namespace += namespace_id
+
+ for critical_process in critical_process_list:
+ # Skip 'dsserve' process since it was not managed by supervisord
+ # TODO: Should remove the following two lines once the issue was solved in the image.
+ if container_name_in_namespace == "syncd" and critical_process == "dsserve":
+ continue
+
+ program_status, program_pid = get_program_info(duthost, container_name_in_namespace, critical_process)
+ check_and_kill_process(duthost, container_name_in_namespace, critical_process, program_status, program_pid)
+
+ for critical_group in critical_group_list:
+ group_program_info = get_group_program_info(duthost, container_name, critical_group)
+ for program_name in group_program_info:
+ check_and_kill_process(duthost, container_name_in_namespace, program_name,
+ group_program_info[program_name][0],
+ group_program_info[program_name][1])
+
+
+def ensure_process_is_running(duthost, container_name, critical_process):
+ """Checks the running status of a critical process and starts it if it was not running.
+
+ Args:
+ duthost: Hostname of DUT.
+ container_name: A string shows name of a container.
+ critical_process: A string shows name of a process.
+
+ Returns:
+ None.
+ """
+ logger.info("Checking whether process '{}' in container '{}' is running...".format(critical_process, container_name))
+ program_status, program_pid = get_program_info(duthost, container_name, critical_process)
+ if program_status == "RUNNING":
+ logger.info("Process '{}' in container '{} is running.".format(critical_process, container_name))
+ else:
+ logger.info("Process '{}' in container '{}' is not running and start it...".format(critical_process, container_name))
+ command_output = duthost.shell("docker exec {} supervisorctl start {}".format(container_name, critical_process))
+ if command_output["rc"] == 0:
+ logger.info("Process '{}' in container '{}' is started.".format(critical_process, container_name))
+ else:
+ pytest.fail("Failed to start process '{}' in container '{}'.".format(critical_process, container_name))
+
+
+def ensure_all_critical_processes_running(duthost, containers_in_namespaces):
+ """Checks whether each critical process is running and starts it if it is not running.
+
+ Args:
+ duthost: Hostname of DUT.
+ containers_in_namespaces: A dictionary where keys are container names and
+ values are lists which contains ids of namespaces this container should reside in.
+
+ Returns:
+ None.
+ """
+ for container_name in containers_in_namespaces.keys():
+ critical_group_list, critical_process_list, succeeded = duthost.get_critical_group_and_process_lists(container_name)
+ pytest_assert(succeeded, "Failed to get critical group and process lists of container '{}'".format(container_name))
+
+ namespace_ids = containers_in_namespaces[container_name]
+ for namespace_id in namespace_ids:
+ container_name_in_namespace = container_name
+ if namespace_id != DEFAULT_ASIC_ID:
+ container_name_in_namespace += namespace_id
+
+ for critical_process in critical_process_list:
+ # Skip 'dsserve' process since it was not managed by supervisord
+ # TODO: Should remove the following two lines once the issue was solved in the image.
+ if container_name_in_namespace == "syncd" and critical_process == "dsserve":
+ continue
+
+ ensure_process_is_running(duthost, container_name_in_namespace, critical_process)
+
+ for critical_group in critical_group_list:
+ group_program_info = get_group_program_info(duthost, container_name_in_namespace, critical_group)
+ for program_name in group_program_info:
+ ensure_process_is_running(duthost, container_name_in_namespace, program_name)
+
+
+def test_monitoring_critical_processes(duthosts, rand_one_dut_hostname, tbinfo):
+ """Tests the feature of monitoring critical processes with Supervisord.
+
+ This function will check whether names of critical processes will appear
+ in the syslog if the autorestart were disabled and these critical processes
+ were stopped.
+
+ Args:
+ duthosts: list of DUTs.
+ rand_one_dut_hostname: hostname of DUT.
+ tbinfo: Testbed information.
+
+ Returns:
+ None.
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix="monitoring_critical_processes")
+ loganalyzer.expect_regex = []
+ bgp_neighbors = duthost.get_bgp_neighbors()
+ up_bgp_neighbors = [ k.lower() for k, v in bgp_neighbors.items() if v["state"] == "established" ]
+
+ skip_containers = []
+ skip_containers.append("database")
+ skip_containers.append("gbsyncd")
+ # Skip 'radv' container on devices whose role is not T0.
+ if tbinfo["topo"]["type"] != "t0":
+ skip_containers.append("radv")
+
+ containers_in_namespaces = get_containers_namespace_ids(duthost, skip_containers)
+
+ expected_alerting_messages = get_expected_alerting_messages(duthost, containers_in_namespaces)
+ loganalyzer.expect_regex.extend(expected_alerting_messages)
+ marker = loganalyzer.init()
+
+ stop_critical_processes(duthost, containers_in_namespaces)
+
+ # Wait for 70 seconds such that Supervisord has a chance to write alerting message into syslog.
+ logger.info("Sleep 70 seconds to wait for the alerting message...")
+ time.sleep(70)
+
+ logger.info("Checking the alerting messages from syslog...")
+ loganalyzer.analyze(marker)
+ logger.info("Found all the expected alerting messages from syslog!")
+
+ logger.info("Executing the config reload...")
+ config_reload(duthost)
+ logger.info("Executing the config reload was done!")
+
+ ensure_all_critical_processes_running(duthost, containers_in_namespaces)
+
+ if not postcheck_critical_processes_status(duthost, up_bgp_neighbors):
+ pytest.fail("Post-check failed after testing the container checker!")
+ logger.info("Post-checking status of critical processes and BGP sessions was done!")
diff --git a/tests/qos/args/qos_sai_args.py b/tests/qos/args/qos_sai_args.py
index ac76b3b7504..29b193649a6 100644
--- a/tests/qos/args/qos_sai_args.py
+++ b/tests/qos/args/qos_sai_args.py
@@ -32,7 +32,7 @@ def add_qos_sai_args(parser):
"--qos_dst_ports",
action="store",
type=lambda opt_value: [int(v) for v in opt_value.translate(None, "[]").split(',')],
- default=[0, 1, 3],
+ default=None,
help="QoS SAI comma separated list of destination ports. Test currently expects exactly 3 destination ports",
)
@@ -40,7 +40,7 @@ def add_qos_sai_args(parser):
"--qos_src_ports",
action="store",
type=lambda opt_value: [int(v) for v in opt_value.translate(None, "[]").split(',')],
- default=[2],
+ default=None,
help="QoS SAI comma separated list of source ports. Test currently expects exactly 1 source port",
)
diff --git a/tests/qos/files/dynamic_buffer_param.json b/tests/qos/files/dynamic_buffer_param.json
index 98dab966197..50bdc2c18ad 100644
--- a/tests/qos/files/dynamic_buffer_param.json
+++ b/tests/qos/files/dynamic_buffer_param.json
@@ -24,6 +24,9 @@
"non-default-dynamic_th": {
"dynamic_th": "2"
}
+ },
+ "shared-headroom-pool": {
+ "size": "1024000"
}
}
}
diff --git a/tests/qos/qos_sai_base.py b/tests/qos/qos_sai_base.py
index 0c3e18d858a..b690824d718 100644
--- a/tests/qos/qos_sai_base.py
+++ b/tests/qos/qos_sai_base.py
@@ -16,6 +16,7 @@ class QosSaiBase:
QosSaiBase contains collection of pytest fixtures that ready the tesbed for QoS SAI test cases.
"""
SUPPORTED_T0_TOPOS = ["t0", "t0-64", "t0-116"]
+ SUPPORTED_T1_TOPOS = {"t1-lag", "t1-64-lag"}
SUPPORTED_PTF_TOPOS = ['ptf32', 'ptf64']
SUPPORTED_ASIC_LIST = ["td2", "th", "th2", "spc1", "spc2", "spc3"]
TARGET_QUEUE_WRED = 3
@@ -24,89 +25,89 @@ class QosSaiBase:
buffer_model_initialized = False
buffer_model = None
- def isBufferInApplDb(self, duthost):
+ def isBufferInApplDb(self, dut_asic):
if not self.buffer_model_initialized:
- self.buffer_model = duthost.shell('redis-cli -n 4 hget "DEVICE_METADATA|localhost" buffer_model')["stdout"]
+ self.buffer_model = dut_asic.run_redis_cmd(
+ argv = [
+ "redis-cli", "-n", "4", "hget",
+ "DEVICE_METADATA|localhost", "buffer_model"
+ ]
+ )
+
self.buffer_model_initialized = True
- logger.info("Buffer model is {}, buffer tables will be fetched from {}".
- format(self.buffer_model or "not defined", "APPL_DB" if self.buffer_model else "CONFIG_DB"))
+ logger.info(
+ "Buffer model is {}, buffer tables will be fetched from {}".
+ format(
+ self.buffer_model or "not defined",
+ "APPL_DB" if self.buffer_model else "CONFIG_DB"
+ )
+ )
return self.buffer_model
- def __runRedisCommandOrAssert(self, duthost, argv=[]):
- """
- Runs Redis command on DUT host.
-
- The method asserts if the command fails.
-
- Args:
- duthost (AnsibleHost): Device Under Test (DUT)
- argv (list): List of commands to run on duthost
-
- Returns:
- stdout (list): List of stdout lines spewed by the invoked command
- """
- result = duthost.shell(argv=argv)
- pytest_assert(result["rc"] == 0,
- "Failed to run Redis command '{0}' with error '{1}'".format(" ".join(map(str, argv)), result["stderr"]))
-
- return result["stdout_lines"]
-
- def __computeBufferThreshold(self, duthost, bufferProfile):
+ def __computeBufferThreshold(self, dut_asic, bufferProfile):
"""
Computes buffer threshold for dynamic threshold profiles
Args:
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic (SonicAsic): Device ASIC Under Test (DUT)
bufferProfile (dict, inout): Map of puffer profile attributes
Returns:
Updates bufferProfile with computed buffer threshold
"""
- db = "0" if self.isBufferInApplDb(duthost) else "4"
+ db = "0" if self.isBufferInApplDb(dut_asic) else "4"
pool = bufferProfile["pool"].encode("utf-8").translate(None, "[]")
- bufferSize = int(self.__runRedisCommandOrAssert(
- duthost,
- argv = ["redis-cli", "-n", db, "HGET", pool, "size"]
- )[0])
+ bufferSize = int(
+ dut_asic.run_redis_cmd(
+ argv = ["redis-cli", "-n", db, "HGET", pool, "size"]
+ )[0]
+ )
bufferScale = 2**float(bufferProfile["dynamic_th"])
bufferScale /= (bufferScale + 1)
- bufferProfile.update({"static_th": int(bufferProfile["size"]) + int(bufferScale * bufferSize)})
+ bufferProfile.update(
+ {"static_th": int(bufferProfile["size"]) + int(bufferScale * bufferSize)}
+ )
- def __updateVoidRoidParams(self, duthost, bufferProfile):
+ def __updateVoidRoidParams(self, dut_asic, bufferProfile):
"""
Updates buffer profile with VOID/ROID params
Args:
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic (SonicAsic): Device Under Test (DUT)
bufferProfile (dict, inout): Map of puffer profile attributes
Returns:
Updates bufferProfile with VOID/ROID obtained from Redis db
"""
- if self.isBufferInApplDb(duthost):
- bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(None, "[]").replace("BUFFER_POOL_TABLE:",'')
+ if self.isBufferInApplDb(dut_asic):
+ bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(
+ None, "[]").replace("BUFFER_POOL_TABLE:",''
+ )
else:
- bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(None, "[]").replace("BUFFER_POOL|",'')
+ bufferPoolName = bufferProfile["pool"].encode("utf-8").translate(
+ None, "[]").replace("BUFFER_POOL|",''
+ )
- bufferPoolVoid = self.__runRedisCommandOrAssert(
- duthost,
- argv = ["redis-cli", "-n", "2", "HGET", "COUNTERS_BUFFER_POOL_NAME_MAP", bufferPoolName]
+ bufferPoolVoid = dut_asic.run_redis_cmd(
+ argv = [
+ "redis-cli", "-n", "2", "HGET",
+ "COUNTERS_BUFFER_POOL_NAME_MAP", bufferPoolName
+ ]
)[0].encode("utf-8")
bufferProfile.update({"bufferPoolVoid": bufferPoolVoid})
- bufferPoolRoid = self.__runRedisCommandOrAssert(
- duthost,
+ bufferPoolRoid = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", "1", "HGET", "VIDTORID", bufferPoolVoid]
)[0].encode("utf-8").replace("oid:",'')
bufferProfile.update({"bufferPoolRoid": bufferPoolRoid})
- def __getBufferProfile(self, request, duthost, table, port, priorityGroup):
+ def __getBufferProfile(self, request, dut_asic, table, port, priorityGroup):
"""
Get buffer profile attribute from Redis db
Args:
request (Fixture): pytest request object
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic(SonicAsic): Device Under Test (DUT)
table (str): Redis table name
port (str): DUT port alias
priorityGroup (str): QoS priority group
@@ -114,20 +115,18 @@ def __getBufferProfile(self, request, duthost, table, port, priorityGroup):
Returns:
bufferProfile (dict): Map of buffer profile attributes
"""
-
- if self.isBufferInApplDb(duthost):
+
+ if self.isBufferInApplDb(dut_asic):
db = "0"
keystr = "{0}:{1}:{2}".format(table, port, priorityGroup)
else:
db = "4"
keystr = "{0}|{1}|{2}".format(table, port, priorityGroup)
- bufferProfileName = self.__runRedisCommandOrAssert(
- duthost,
+ bufferProfileName = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGET", keystr, "profile"]
)[0].encode("utf-8").translate(None, "[]")
- result = self.__runRedisCommandOrAssert(
- duthost,
+ result = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", db, "HGETALL", bufferProfileName]
)
it = iter(result)
@@ -136,55 +135,63 @@ def __getBufferProfile(self, request, duthost, table, port, priorityGroup):
# Update profile static threshold value if profile threshold is dynamic
if "dynamic_th" in bufferProfile.keys():
- self.__computeBufferThreshold(duthost, bufferProfile)
+ self.__computeBufferThreshold(dut_asic, bufferProfile)
if "pg_lossless" in bufferProfileName:
- pytest_assert("xon" in bufferProfile.keys() and "xoff" in bufferProfile.keys(),
- "Could not find xon and/or xoff values for profile '{0}'".format(bufferProfileName))
+ pytest_assert(
+ "xon" in bufferProfile.keys() and "xoff" in bufferProfile.keys(),
+ "Could not find xon and/or xoff values for profile '{0}'".format(
+ bufferProfileName
+ )
+ )
- self.__updateVoidRoidParams(duthost, bufferProfile)
+ self.__updateVoidRoidParams(dut_asic, bufferProfile)
return bufferProfile
- def __getSharedHeadroomPoolSize(self, request, duthost):
+ def __getSharedHeadroomPoolSize(self, request, dut_asic):
"""
Get shared headroom pool size from Redis db
Args:
request (Fixture): pytest request object
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic (SonicAsic): Device Under Test (DUT)
Returns:
size (str) size of shared headroom pool
None if shared headroom pool isn't enabled
"""
- result = self.__runRedisCommandOrAssert(
- duthost,
- argv = ["redis-cli", "-n", "4", "HGETALL", "BUFFER_POOL|ingress_lossless_pool"]
+ result = dut_asic.run_redis_cmd(
+ argv = [
+ "redis-cli", "-n", "4", "HGETALL",
+ "BUFFER_POOL|ingress_lossless_pool"
+ ]
)
it = iter(result)
ingressLosslessPool = dict(zip(it, it))
return ingressLosslessPool.get("xoff")
- def __getEcnWredParam(self, duthost, table, port):
+ def __getEcnWredParam(self, dut_asic, table, port):
"""
Get ECN/WRED parameters from Redis db
Args:
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic (SonicAsic): Device Under Test (DUT)
table (str): Redis table name
port (str): DUT port alias
Returns:
wredProfile (dict): Map of ECN/WRED attributes
"""
- wredProfileName = self.__runRedisCommandOrAssert(
- duthost,
- argv = ["redis-cli", "-n", "4", "HGET", "{0}|{1}|{2}".format(table, port, self.TARGET_QUEUE_WRED), "wred_profile"]
+ wredProfileName = dut_asic.run_redis_cmd(
+ argv = [
+ "redis-cli", "-n", "4", "HGET",
+ "{0}|{1}|{2}".format(table, port, self.TARGET_QUEUE_WRED),
+ "wred_profile"
+ ]
)[0].encode("utf-8").translate(None, "[]")
- result = self.__runRedisCommandOrAssert(
- duthost,
+ result = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", "4", "HGETALL", wredProfileName]
)
it = iter(result)
@@ -192,42 +199,45 @@ def __getEcnWredParam(self, duthost, table, port):
return wredProfile
- def __getWatermarkStatus(self, duthost):
+ def __getWatermarkStatus(self, dut_asic):
"""
Get watermark status from Redis db
Args:
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic (SonicAsic): Device Under Test (DUT)
Returns:
watermarkStatus (str): Watermark status
"""
- watermarkStatus = self.__runRedisCommandOrAssert(
- duthost,
- argv = ["redis-cli", "-n", "4", "HGET", "FLEX_COUNTER_TABLE|QUEUE_WATERMARK", "FLEX_COUNTER_STATUS"]
+ watermarkStatus = dut_asic.run_redis_cmd(
+ argv = [
+ "redis-cli", "-n", "4", "HGET",
+ "FLEX_COUNTER_TABLE|QUEUE_WATERMARK", "FLEX_COUNTER_STATUS"
+ ]
)[0].encode("utf-8")
return watermarkStatus
- def __getSchedulerParam(self, duthost, port, queue):
+ def __getSchedulerParam(self, dut_asic, port, queue):
"""
Get scheduler parameters from Redis db
Args:
- duthost (AnsibleHost): Device Under Test (DUT)
+ dut_asic (SonicAsic): Device Under Test (DUT)
port (str): DUT port alias
queue (str): QoS queue
Returns:
SchedulerParam (dict): Map of scheduler parameters
"""
- schedProfile = self.__runRedisCommandOrAssert(
- duthost,
- argv = ["redis-cli", "-n", "4", "HGET", "QUEUE|{0}|{1}".format(port, queue), "scheduler"]
+ schedProfile = dut_asic.run_redis_cmd(
+ argv = [
+ "redis-cli", "-n", "4", "HGET",
+ "QUEUE|{0}|{1}".format(port, queue), "scheduler"
+ ]
)[0].encode("utf-8").translate(None, "[]")
- schedWeight = self.__runRedisCommandOrAssert(
- duthost,
+ schedWeight = dut_asic.run_redis_cmd(
argv = ["redis-cli", "-n", "4", "HGET", schedProfile, "weight"]
)[0].encode("utf-8")
@@ -277,11 +287,30 @@ def __buildTestPorts(self, request, testPortIds, testPortIps):
dstPorts = request.config.getoption("--qos_dst_ports")
srcPorts = request.config.getoption("--qos_src_ports")
- pytest_assert(len(set(dstPorts).intersection(set(srcPorts))) == 0,
- "Duplicate destination and source ports '{0}'".format(set(dstPorts).intersection(set(srcPorts))))
+ if dstPorts is None:
+ if len(testPortIds) >= 4:
+ dstPorts = [0, 2, 3]
+ elif len(testPortIds) == 3:
+ dstPorts = [0, 2, 2]
+ else:
+ dstPorts = [0, 0, 0]
+
+ if srcPorts is None:
+ srcPorts = [1]
+
+ pytest_assert(len(testPortIds) >= 2, "Provide at least 2 test ports")
+ logging.debug(
+ "Test Port IDs:{} IPs:{}".format(testPortIds, testPortIps)
+ )
+ logging.debug("Test Port dst:{}, src:{}".format(dstPorts, srcPorts))
+
+ pytest_assert(
+ len(set(dstPorts).intersection(set(srcPorts))) == 0,
+ "Duplicate destination and source ports '{0}'".format(
+ set(dstPorts).intersection(set(srcPorts))
+ )
+ )
- pytest_assert(len(dstPorts) == 3 and len(srcPorts) == 1,
- "Invalid number of ports provided, qos_dst_ports:{0}, qos_src_ports:{1}".format(len(dstPorts), len(srcPorts)))
#TODO: Randomize port selection
return {
@@ -336,6 +365,7 @@ def runPtfTest(self, ptfhost, testCase='', testParams={}):
chdir = "/root",
)["rc"] == 0, "Failed when running test '{0}'".format(testCase))
+
@pytest.fixture(scope='class')
def swapSyncd(self, request, duthosts, rand_one_dut_hostname, creds):
"""
@@ -360,7 +390,10 @@ def swapSyncd(self, request, duthosts, rand_one_dut_hostname, creds):
docker.restore_default_syncd(duthost, creds)
@pytest.fixture(scope='class', autouse=True)
- def dutConfig(self, request, duthosts, rand_one_dut_hostname, tbinfo):
+ def dutConfig(
+ self, request, duthosts, rand_one_dut_hostname, tbinfo,
+ enum_frontend_asic_index
+ ):
"""
Build DUT host config pertaining to QoS SAI tests
@@ -369,46 +402,92 @@ def dutConfig(self, request, duthosts, rand_one_dut_hostname, tbinfo):
duthost (AnsibleHost): Device Under Test (DUT)
Returns:
- dutConfig (dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs, and
- test ports
+ dutConfig (dict): Map of DUT config containing dut interfaces,
+ test port IDs, test port IPs, and test ports
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
dutLagInterfaces = []
- mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
-
- for _, lag in mgFacts["minigraph_portchannels"].items():
- for intf in lag["members"]:
- dutLagInterfaces.append(mgFacts["minigraph_ptf_indices"][intf])
+ dutPortIps = {}
+ testPortIps = {}
- testPortIds = set(mgFacts["minigraph_ptf_indices"][port] for port in mgFacts["minigraph_ports"].keys())
- testPortIds -= set(dutLagInterfaces)
- if isMellanoxDevice(duthost):
- # The last port is used for up link from DUT switch
- testPortIds -= {len(mgFacts["minigraph_ptf_indices"]) - 1}
- testPortIds = sorted(testPortIds)
+ mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
+ topo = tbinfo["topo"]["name"]
- # get current DUT port IPs
- dutPortIps = {}
- for portConfig in mgFacts["minigraph_interfaces"]:
- if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4:
- portIndex = mgFacts["minigraph_ptf_indices"][portConfig["attachto"]]
- if portIndex in testPortIds:
- dutPortIps.update({portIndex: portConfig["peer_addr"]})
+ testPortIds = []
+ # LAG ports in T1 TOPO need to be removed in Mellanox devices
+ if topo in self.SUPPORTED_T0_TOPOS or isMellanoxDevice(duthost):
+ pytest_assert(
+ not duthost.sonichost.is_multi_asic, "Fixture not supported on T0 multi ASIC"
+ )
+ for _, lag in mgFacts["minigraph_portchannels"].items():
+ for intf in lag["members"]:
+ dutLagInterfaces.append(mgFacts["minigraph_ptf_indices"][intf])
+
+ testPortIds = set(mgFacts["minigraph_ptf_indices"][port]
+ for port in mgFacts["minigraph_ports"].keys())
+ testPortIds -= set(dutLagInterfaces)
+ if isMellanoxDevice(duthost):
+ # The last port is used for up link from DUT switch
+ testPortIds -= {len(mgFacts["minigraph_ptf_indices"]) - 1}
+ testPortIds = sorted(testPortIds)
+
+ # get current DUT port IPs
+ dutPortIps = {}
+ for portConfig in mgFacts["minigraph_interfaces"]:
+ if ipaddress.ip_interface(portConfig['peer_addr']).ip.version == 4:
+ portIndex = mgFacts["minigraph_ptf_indices"][portConfig["attachto"]]
+ if portIndex in testPortIds:
+ dutPortIps.update({portIndex: portConfig["peer_addr"]})
+
+ testPortIps = self.__assignTestPortIps(mgFacts)
+
+ elif topo in self.SUPPORTED_T1_TOPOS:
+ for iface,addr in dut_asic.get_active_ip_interfaces().items():
+ if iface.startswith("Ethernet"):
+ portIndex = mgFacts["minigraph_ptf_indices"][iface]
+ dutPortIps.update({portIndex: addr["peer_ipv4"]})
+ elif iface.startswith("PortChannel"):
+ portName = next(
+ iter(mgFacts["minigraph_portchannels"][iface]["members"])
+ )
+ portIndex = mgFacts["minigraph_ptf_indices"][portName]
+ dutPortIps.update({portIndex: addr["peer_ipv4"]})
+
+ testPortIds = sorted(dutPortIps.keys())
+ else:
+ raise Exception("Unsupported testbed type - {}".format(topo))
- testPortIps = self.__assignTestPortIps(mgFacts)
# restore currently assigned IPs
testPortIps.update(dutPortIps)
testPorts = self.__buildTestPorts(request, testPortIds, testPortIps)
yield {
- "dutInterfaces" : {index: port for port, index in mgFacts["minigraph_ptf_indices"].items()},
+ "dutInterfaces" : {
+ index: port for port, index in mgFacts["minigraph_ptf_indices"].items()
+ },
"testPortIds": testPortIds,
"testPortIps": testPortIps,
"testPorts": testPorts,
}
@pytest.fixture(scope='class')
- def updateIptables(self, duthosts, rand_one_dut_hostname, swapSyncd):
+ def ssh_tunnel_to_syncd_rpc(
+ self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index,
+ swapSyncd
+ ):
+ duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
+ dut_asic.create_ssh_tunnel_sai_rpc()
+
+ yield
+
+ dut_asic.remove_ssh_tunnel_sai_rpc()
+
+ @pytest.fixture(scope='class')
+ def updateIptables(
+ self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index, swapSyncd
+ ):
"""
Update iptables on DUT host with drop rule for BGP SYNC packets
@@ -420,34 +499,25 @@ def updateIptables(self, duthosts, rand_one_dut_hostname, swapSyncd):
None
"""
duthost = duthosts[rand_one_dut_hostname]
- def updateIptablesDropRule(duthost, ipVersion, state='present'):
- duthost.iptables(
- ip_version=ipVersion,
- action="insert",
- rule_num="1",
- chain="INPUT",
- jump="DROP",
- protocol="tcp",
- destination_port="bgp",
- state=state
- )
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
-
- ipVersions = [{"ipVersion": "ipv4"}, {"ipVersion": "ipv6"}]
+ ipVersions = [{"ip_version": "ipv4"}, {"ip_version": "ipv6"}]
logger.info("Add ip[6]tables rule to drop BGP SYN Packet from peer so that we do not ACK back")
for ipVersion in ipVersions:
- updateIptablesDropRule(duthost, state="present", **ipVersion)
+ dut_asic.bgp_drop_rule(state="present", **ipVersion)
yield
logger.info("Remove ip[6]tables rule to drop BGP SYN Packet from Peer")
for ipVersion in ipVersions:
- updateIptablesDropRule(duthost, state="absent", **ipVersion)
+ dut_asic.bgp_drop_rule(state="absent", **ipVersion)
@pytest.fixture(scope='class')
- def stopServices(self, duthosts, rand_one_dut_hostname, swapSyncd, \
- enable_container_autorestart, disable_container_autorestart):
+ def stopServices(
+ self, duthosts, rand_one_dut_hostname, enum_frontend_asic_index,
+ swapSyncd, enable_container_autorestart, disable_container_autorestart
+ ):
"""
Stop services (lldp-syncs, lldpd, bgpd) on DUT host prior to test start
@@ -459,6 +529,7 @@ def stopServices(self, duthosts, rand_one_dut_hostname, swapSyncd, \
None
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
def updateDockerService(host, docker="", action="", service=""):
"""
Helper function to update docker services
@@ -479,24 +550,23 @@ def updateDockerService(host, docker="", action="", service=""):
service=service
)
)
+ logger.info("{}ed {}".format(action, service))
services = [
- {"docker": "lldp", "service": "lldp-syncd"},
- {"docker": "lldp", "service": "lldpd"},
- {"docker": "bgp", "service": "bgpd"},
- {"docker": "bgp", "service": "bgpmon"}
+ {"docker": dut_asic.get_docker_name("lldp"), "service": "lldp-syncd"},
+ {"docker": dut_asic.get_docker_name("lldp"), "service": "lldpd"},
+ {"docker": dut_asic.get_docker_name("bgp"), "service": "bgpd"},
+ {"docker": dut_asic.get_docker_name("bgp"), "service": "bgpmon"},
]
feature_list = ['lldp', 'bgp', 'syncd', 'swss']
disable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list)
- logger.info("Stop lldp, lldp-syncd, and bgpd services")
for service in services:
updateDockerService(duthost, action="stop", **service)
yield
enable_container_autorestart(duthost, testcase="test_qos_sai", feature_list=feature_list)
- logger.info("Start lldp, lldp-syncd, and bgpd services")
for service in services:
updateDockerService(duthost, action="start", **service)
@@ -539,7 +609,9 @@ def updateLoganalyzerExceptions(self, rand_one_dut_hostname, loganalyzer):
yield
@pytest.fixture(scope='class', autouse=True)
- def disablePacketAging(self, duthosts, rand_one_dut_hostname, stopServices):
+ def disablePacketAging(
+ self, duthosts, rand_one_dut_hostname, stopServices
+ ):
"""
disable packet aging on DUT host
@@ -551,6 +623,7 @@ def disablePacketAging(self, duthosts, rand_one_dut_hostname, stopServices):
None
"""
duthost = duthosts[rand_one_dut_hostname]
+
if isMellanoxDevice(duthost):
logger.info("Disable Mellanox packet aging")
duthost.copy(src="qos/files/mellanox/packets_aging.py", dest="/tmp")
@@ -565,7 +638,12 @@ def disablePacketAging(self, duthosts, rand_one_dut_hostname, stopServices):
duthost.command("docker exec syncd rm -rf /packets_aging.py")
@pytest.fixture(scope='class', autouse=True)
- def dutQosConfig(self, duthosts, rand_one_dut_hostname, dutConfig, ingressLosslessProfile, ingressLossyProfile, egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize, tbinfo):
+ def dutQosConfig(
+ self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
+ dutConfig, ingressLosslessProfile, ingressLossyProfile,
+ egressLosslessProfile, egressLossyProfile, sharedHeadroomPoolSize,
+ tbinfo
+ ):
"""
Prepares DUT host QoS configuration
@@ -578,11 +656,12 @@ def dutQosConfig(self, duthosts, rand_one_dut_hostname, dutConfig, ingressLossle
QoSConfig (dict): Map containing DUT host QoS configuration
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
pytest_assert("minigraph_hwsku" in mgFacts, "Could not find DUT SKU")
profileName = ingressLosslessProfile["profileName"]
- if self.isBufferInApplDb(duthost):
+ if self.isBufferInApplDb(dut_asic):
profile_pattern = "^BUFFER_PROFILE_TABLE\:pg_lossless_(.*)_profile$"
else:
profile_pattern = "^BUFFER_PROFILE\|pg_lossless_(.*)_profile"
@@ -659,7 +738,10 @@ def dutTestParams(self, duthosts, rand_one_dut_hostname, tbinfo, ptf_portmap_fil
}
@pytest.fixture(scope='class')
- def releaseAllPorts(self, ptfhost, dutTestParams, updateIptables):
+ def releaseAllPorts(
+ self, duthosts, rand_one_dut_hostname, ptfhost, dutTestParams,
+ updateIptables, ssh_tunnel_to_syncd_rpc
+ ):
"""
Release all paused ports prior to running QoS SAI test cases
@@ -674,10 +756,16 @@ def releaseAllPorts(self, ptfhost, dutTestParams, updateIptables):
Raises:
RunAnsibleModuleFail if ptf test fails
"""
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.ReleaseAllPorts", testParams=dutTestParams["basicParams"])
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.ReleaseAllPorts",
+ testParams=dutTestParams["basicParams"]
+ )
@pytest.fixture(scope='class', autouse=True)
- def populateArpEntries(self, duthosts, rand_one_dut_hostname, ptfhost, dutTestParams, dutConfig, releaseAllPorts):
+ def populateArpEntries(
+ self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
+ ptfhost, dutTestParams, dutConfig, releaseAllPorts,
+ ):
"""
Update ARP entries of QoS SAI test ports
@@ -696,13 +784,14 @@ def populateArpEntries(self, duthosts, rand_one_dut_hostname, ptfhost, dutTestPa
RunAnsibleModuleFail if ptf test fails
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
saiQosTest = None
if dutTestParams["topo"] in self.SUPPORTED_T0_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulate"
elif dutTestParams["topo"] in self.SUPPORTED_PTF_TOPOS:
saiQosTest = "sai_qos_tests.ARPpopulatePTF"
else:
- result = duthost.command(argv = ["arp", "-n"])
+ result = dut_asic.command("arp -n")
pytest_assert(result["rc"] == 0, "failed to run arp command on {0}".format(duthost.hostname))
if result["stdout"].find("incomplete") == -1:
saiQosTest = "sai_qos_tests.ARPpopulate"
@@ -710,10 +799,15 @@ def populateArpEntries(self, duthosts, rand_one_dut_hostname, ptfhost, dutTestPa
if saiQosTest:
testParams = dutTestParams["basicParams"]
testParams.update(dutConfig["testPorts"])
- self.runPtfTest(ptfhost, testCase=saiQosTest, testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase=saiQosTest, testParams=testParams
+ )
@pytest.fixture(scope='class', autouse=True)
- def sharedHeadroomPoolSize(self, request, duthosts, rand_one_dut_hostname):
+ def sharedHeadroomPoolSize(
+ self, request, duthosts, enum_frontend_asic_index,
+ rand_one_dut_hostname
+ ):
"""
Retreives shared headroom pool size
@@ -728,11 +822,14 @@ def sharedHeadroomPoolSize(self, request, duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
yield self.__getSharedHeadroomPoolSize(
request,
- duthost
+ duthost.asic_instance(enum_frontend_asic_index)
)
@pytest.fixture(scope='class', autouse=True)
- def ingressLosslessProfile(self, request, duthosts, rand_one_dut_hostname, dutConfig):
+ def ingressLosslessProfile(
+ self, request, duthosts, enum_frontend_asic_index,
+ rand_one_dut_hostname, dutConfig
+ ):
"""
Retreives ingress lossless profile
@@ -746,16 +843,20 @@ def ingressLosslessProfile(self, request, duthosts, rand_one_dut_hostname, dutCo
ingressLosslessProfile (dict): Map of ingress lossless buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
- duthost,
- "BUFFER_PG_TABLE" if self.isBufferInApplDb(duthost) else "BUFFER_PG",
+ dut_asic,
+ "BUFFER_PG_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_PG",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"3-4"
)
@pytest.fixture(scope='class', autouse=True)
- def ingressLossyProfile(self, request, duthosts, rand_one_dut_hostname, dutConfig):
+ def ingressLossyProfile(
+ self, request, duthosts, enum_frontend_asic_index,
+ rand_one_dut_hostname, dutConfig
+ ):
"""
Retreives ingress lossy profile
@@ -769,16 +870,20 @@ def ingressLossyProfile(self, request, duthosts, rand_one_dut_hostname, dutConfi
ingressLossyProfile (dict): Map of ingress lossy buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
- duthost,
- "BUFFER_PG_TABLE" if self.isBufferInApplDb(duthost) else "BUFFER_PG",
+ dut_asic,
+ "BUFFER_PG_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_PG",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"0"
)
@pytest.fixture(scope='class', autouse=True)
- def egressLosslessProfile(self, request, duthosts, rand_one_dut_hostname, dutConfig):
+ def egressLosslessProfile(
+ self, request, duthosts, enum_frontend_asic_index,
+ rand_one_dut_hostname, dutConfig
+ ):
"""
Retreives egress lossless profile
@@ -792,79 +897,93 @@ def egressLosslessProfile(self, request, duthosts, rand_one_dut_hostname, dutCon
egressLosslessProfile (dict): Map of egress lossless buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
- duthost,
- "BUFFER_QUEUE_TABLE" if self.isBufferInApplDb(duthost) else "BUFFER_QUEUE",
+ dut_asic,
+ "BUFFER_QUEUE_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_QUEUE",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"3-4"
)
@pytest.fixture(scope='class', autouse=True)
- def egressLossyProfile(self, request, duthosts, rand_one_dut_hostname, dutConfig):
+ def egressLossyProfile(
+ self, request, duthosts, enum_frontend_asic_index,
+ rand_one_dut_hostname, dutConfig
+ ):
"""
Retreives egress lossy profile
Args:
request (Fixture): pytest request object
duthost (AnsibleHost): Device Under Test (DUT)
- dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
- and test ports
+ dutConfig (Fixture, dict): Map of DUT config containing dut interfaces,
+ test port IDs, test port IPs, and test ports
Returns:
egressLossyProfile (dict): Map of egress lossy buffer profile attributes
"""
duthost = duthosts[rand_one_dut_hostname]
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
yield self.__getBufferProfile(
request,
- duthost,
- "BUFFER_QUEUE_TABLE" if self.isBufferInApplDb(duthost) else "BUFFER_QUEUE",
+ dut_asic,
+ "BUFFER_QUEUE_TABLE" if self.isBufferInApplDb(dut_asic) else "BUFFER_QUEUE",
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
"0-2"
)
@pytest.fixture(scope='class')
- def losslessSchedProfile(self, duthosts, rand_one_dut_hostname, dutConfig):
+ def losslessSchedProfile(
+ self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
+ dutConfig
+ ):
"""
Retreives lossless scheduler profile
Args:
duthost (AnsibleHost): Device Under Test (DUT)
- dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
- and test ports
+ dutConfig (Fixture, dict): Map of DUT config containing dut interfaces,
+ test port IDs, test port IPs, and test ports
Returns:
losslessSchedProfile (dict): Map of scheduler parameters
"""
duthost = duthosts[rand_one_dut_hostname]
yield self.__getSchedulerParam(
- duthost,
+ duthost.asic_instance(enum_frontend_asic_index),
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
self.TARGET_LOSSLESS_QUEUE_SCHED
)
@pytest.fixture(scope='class')
- def lossySchedProfile(self, duthosts, rand_one_dut_hostname, dutConfig):
+ def lossySchedProfile(
+ self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
+ dutConfig
+ ):
"""
Retreives lossy scheduler profile
Args:
duthost (AnsibleHost): Device Under Test (DUT)
- dutConfig (Fixture, dict): Map of DUT config containing dut interfaces, test port IDs, test port IPs,
- and test ports
+ dutConfig (Fixture, dict): Map of DUT config containing dut interfaces,
+ test port IDs, test port IPs, and test ports
Returns:
lossySchedProfile (dict): Map of scheduler parameters
"""
duthost = duthosts[rand_one_dut_hostname]
yield self.__getSchedulerParam(
- duthost,
+ duthost.asic_instance(enum_frontend_asic_index),
dutConfig["dutInterfaces"][dutConfig["testPorts"]["src_port_id"]],
self.TARGET_LOSSY_QUEUE_SCHED
)
@pytest.fixture
- def updateSchedProfile(self, duthosts, rand_one_dut_hostname, dutQosConfig, losslessSchedProfile, lossySchedProfile):
+ def updateSchedProfile(
+ self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname,
+ dutQosConfig, losslessSchedProfile, lossySchedProfile
+ ):
"""
Updates lossless/lossy scheduler profiles
@@ -888,8 +1007,7 @@ def updateRedisSchedParam(schedParam):
Returns:
None
"""
- self.__runRedisCommandOrAssert(
- duthost,
+ duthost.asic_instance(enum_frontend_asic_index).run_redis_cmd(
argv = [
"redis-cli",
"-n",
@@ -902,8 +1020,14 @@ def updateRedisSchedParam(schedParam):
)
wrrSchedParams = [
- {"profile": lossySchedProfile["schedProfile"], "qosConfig": dutQosConfig["param"]["wrr_chg"]["lossy_weight"]},
- {"profile": losslessSchedProfile["schedProfile"], "qosConfig": dutQosConfig["param"]["wrr_chg"]["lossless_weight"]},
+ {
+ "profile": lossySchedProfile["schedProfile"],
+ "qosConfig": dutQosConfig["param"]["wrr_chg"]["lossy_weight"]
+ },
+ {
+ "profile": losslessSchedProfile["schedProfile"],
+ "qosConfig": dutQosConfig["param"]["wrr_chg"]["lossless_weight"]
+ },
]
for schedParam in wrrSchedParams:
@@ -912,15 +1036,23 @@ def updateRedisSchedParam(schedParam):
yield
schedProfileParams = [
- {"profile": lossySchedProfile["schedProfile"], "qosConfig": lossySchedProfile["schedWeight"]},
- {"profile": losslessSchedProfile["schedProfile"], "qosConfig": losslessSchedProfile["schedWeight"]},
+ {
+ "profile": lossySchedProfile["schedProfile"],
+ "qosConfig": lossySchedProfile["schedWeight"]
+ },
+ {
+ "profile": losslessSchedProfile["schedProfile"],
+ "qosConfig": losslessSchedProfile["schedWeight"]
+ },
]
for schedParam in schedProfileParams:
updateRedisSchedParam(schedParam)
@pytest.fixture
- def resetWatermark(self, duthosts, rand_one_dut_hostname):
+ def resetWatermark(
+ self, duthosts, enum_frontend_asic_index, rand_one_dut_hostname
+ ):
"""
Reset queue watermark
@@ -931,6 +1063,7 @@ def resetWatermark(self, duthosts, rand_one_dut_hostname):
None
"""
duthost = duthosts[rand_one_dut_hostname]
- duthost.shell("counterpoll watermark enable")
- duthost.shell("sleep 20")
- duthost.shell("counterpoll watermark disable")
+ dut_asic = duthost.asic_instance(enum_frontend_asic_index)
+ dut_asic.command("counterpoll watermark enable")
+ dut_asic.command("sleep 20")
+ dut_asic.command("counterpoll watermark disable")
diff --git a/tests/qos/test_buffer.py b/tests/qos/test_buffer.py
index 0b7be8d50ed..58772bca2da 100644
--- a/tests/qos/test_buffer.py
+++ b/tests/qos/test_buffer.py
@@ -10,6 +10,8 @@
from tests.common import config_reload
from tests.common.utilities import wait_until
from tests.common.helpers.assertions import pytest_assert
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts
+from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer
profile_format = 'pg_lossless_{}_{}_profile'
LOSSLESS_PROFILE_PATTERN = 'pg_lossless_([1-9][0-9]*000)_([1-9][0-9]*m)_profile'
@@ -17,10 +19,14 @@
DEFAULT_CABLE_LENGTH_LIST = None
DEFAULT_LOSSLESS_HEADROOM_DATA = None
DEFAULT_INGRESS_POOL_NUMBER = 0
+DEFAULT_SHARED_HEADROOM_POOL_ENABLED = False
+DEFAULT_OVER_SUBSCRIBE_RATIO = None
+DEFAULT_SHARED_HEADROOM_POOL_SIZE = None
DEFAULT_MTU = None
TESTPARAM_HEADROOM_OVERRIDE = None
TESTPARAM_LOSSLESS_PG = None
+TESTPARAM_SHARED_HEADROOM_POOL = None
BUFFER_MODEL_DYNAMIC = True
@@ -46,6 +52,27 @@ def detect_ingress_pool_number(duthost):
DEFAULT_INGRESS_POOL_NUMBER = len(pools.split())
+def detect_shared_headroom_pool_mode(duthost):
+ """Detect whether shared headroom pool is enabled
+
+ Args:
+ duthost: The DUT host object
+ """
+ global DEFAULT_SHARED_HEADROOM_POOL_ENABLED
+ global DEFAULT_SHARED_HEADROOM_POOL_SIZE
+ global DEFAULT_OVER_SUBSCRIBE_RATIO
+
+ over_subscribe_ratio = duthost.shell('redis-cli -n 4 hget "DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE" over_subscribe_ratio')['stdout']
+ if over_subscribe_ratio and over_subscribe_ratio != '0':
+ DEFAULT_SHARED_HEADROOM_POOL_ENABLED = True
+ DEFAULT_OVER_SUBSCRIBE_RATIO = int(over_subscribe_ratio)
+
+ shared_headroom_pool_size = duthost.shell('redis-cli -n 4 hget "BUFFER_POOL|ingress_lossless_pool" xoff')['stdout']
+ if shared_headroom_pool_size and shared_headroom_pool_size != '0':
+ DEFAULT_SHARED_HEADROOM_POOL_ENABLED = True
+ DEFAULT_SHARED_HEADROOM_POOL_SIZE = int(shared_headroom_pool_size)
+
+
def detect_default_mtu(duthost, port_to_test):
"""Detect the mtu and store it for futher use. Called only once when the module is initialized
@@ -95,6 +122,7 @@ def load_test_parameters(duthost):
global DEFAULT_CABLE_LENGTH_LIST
global TESTPARAM_HEADROOM_OVERRIDE
global TESTPARAM_LOSSLESS_PG
+ global TESTPARAM_SHARED_HEADROOM_POOL
param_file_name = "qos/files/dynamic_buffer_param.json"
with open(param_file_name) as file:
@@ -105,6 +133,7 @@ def load_test_parameters(duthost):
DEFAULT_CABLE_LENGTH_LIST = vendor_specific_param['default_cable_length']
TESTPARAM_HEADROOM_OVERRIDE = vendor_specific_param['headroom-override']
TESTPARAM_LOSSLESS_PG = vendor_specific_param['lossless_pg']
+ TESTPARAM_SHARED_HEADROOM_POOL = vendor_specific_param['shared-headroom-pool']
@pytest.fixture(scope="module", autouse=True)
@@ -117,6 +146,7 @@ def setup_module(duthost):
detect_buffer_model(duthost)
if BUFFER_MODEL_DYNAMIC:
detect_ingress_pool_number(duthost)
+ detect_shared_headroom_pool_mode(duthost)
load_lossless_headroom_data(duthost)
load_test_parameters(duthost)
@@ -129,6 +159,22 @@ def setup_module(duthost):
yield
+def init_log_analyzer(duthost, marker, expected):
+ loganalyzer = LogAnalyzer(ansible_host=duthost, marker_prefix=marker)
+ marker = loganalyzer.init()
+
+ loganalyzer.load_common_config()
+ loganalyzer.expect_regex = []
+ loganalyzer.expect_regex.extend(expected)
+
+ return loganalyzer, marker
+
+
+def check_log_analyzer(loganalyzer, marker):
+ loganalyzer.analyze(marker)
+ return loganalyzer
+
+
def check_pool_size(duthost, ingress_lossless_pool_oid, **kwargs):
"""Check whether the pool size has been updated correctedly
@@ -139,77 +185,161 @@ def check_pool_size(duthost, ingress_lossless_pool_oid, **kwargs):
Args:
ingress_lossless_pool_oid: The SAI OID of the ingress lossless pool in ASIC_DB
kwargs: The parameters based on which the expected pool size is calculated.
- They are represeted in form of kwargs because different vendor can require different parameters
+ They are represented in form of kwargs because different vendor can require different parameters
For Mellanox, it includes:
- - old / new pg size
- - old / new pg numbers
- - current pool size
- - the expected pool size is calculated as:
- current_pool_size + old_pg_num * old_pg_size - new_pg_num * new_pg_size
+ - Old / new pg size
+ - Old / new pg xoff (required only over subscribe ratio is defined)
+ - Old / new pg numbers
+ - Old_ratio / new_ratio (required only over subscribe ratio is defined)
+ - Current pool size
+ - Current shared headroom pool size (required only over subscribe ratio is defined)
+ - The expected pool size is calculated as following:
+ - Shared headroom pool disabled:
+ current_pool_size + old_pg_num * old_pg_size - new_pg_num * new_pg_size
+ - Shared headroom pool enabled by over subscribe ratio:
+ current_pool_size + old_pg_num * old_pg_size - new_pg_num * new_pg_size
+ + (old_pg_num * old_pg_xoff - new_pg_num * new_pg_xoff) * over_subscribe_ratio
"""
- if duthost.facts['asic_type'] == 'mellanox':
- old_headroom = int(kwargs["old_headroom"])
+ logging.debug("Kwargs {}".format(kwargs))
- if "old_pg_number" in kwargs:
- old_pg_number = int(kwargs["old_pg_number"])
+ if duthost.facts['asic_type'] == 'mellanox':
+ if kwargs.get("old_ratio") and kwargs.get("new_ratio"):
+ curr_pool_size = int(kwargs["pool_size"])
+ curr_shp_size = int(kwargs["shp_size"])
+ old_ratio = int(kwargs.get("old_ratio"))
+ new_ratio = int(kwargs.get("new_ratio"))
+ original_memory = curr_pool_size * DEFAULT_INGRESS_POOL_NUMBER + curr_shp_size
+ if new_ratio == 0:
+ expected_shp_size = 0
+ expected_pool_size = (original_memory - curr_shp_size * old_ratio) / DEFAULT_INGRESS_POOL_NUMBER
+ else:
+ expected_shp_size = curr_shp_size * old_ratio / new_ratio
+ expected_pool_size = (original_memory - expected_shp_size) / DEFAULT_INGRESS_POOL_NUMBER
+ elif kwargs.get("config_shp_size"):
+ expected_shp_size = int(kwargs.get("config_shp_size"))
+ expected_pool_size = None
else:
- old_pg_number = 2
+ curr_pool_size = int(kwargs["pool_size"])
- if "new_pg_number" in kwargs:
- new_pg_number = int(kwargs["new_pg_number"])
- else:
- new_pg_number = old_pg_number
+ if "old_pg_number" in kwargs:
+ old_pg_number = int(kwargs["old_pg_number"])
+ else:
+ old_pg_number = 2
- if new_pg_number:
- if "new_headroom" in kwargs:
- new_headroom = int(kwargs["new_headroom"])
+ if old_pg_number:
+ old_size = int(kwargs["old_size"])
else:
- new_headroom = old_headroom
- new_reserved = new_pg_number * new_headroom
- else:
- new_reserved = 0
+ old_size = 0
- curr_pool_size = int(kwargs["pool_size"])
+ if "new_pg_number" in kwargs:
+ new_pg_number = int(kwargs["new_pg_number"])
+ else:
+ new_pg_number = old_pg_number
- original_memory = curr_pool_size * DEFAULT_INGRESS_POOL_NUMBER + old_headroom * old_pg_number
- expected_pool_size = (original_memory - new_reserved) / DEFAULT_INGRESS_POOL_NUMBER
+ if new_pg_number:
+ if "new_size" in kwargs:
+ new_size = int(kwargs["new_size"])
+ else:
+ new_size = old_size
+ new_reserved = new_pg_number * new_size
+ else:
+ new_reserved = 0
+
+ original_memory = curr_pool_size * DEFAULT_INGRESS_POOL_NUMBER + old_size * old_pg_number
+
+ old_speed = kwargs.get("old_speed")
+ new_speed = kwargs.get("new_speed")
+ if old_speed and new_speed:
+ extra_overhead_400G = 18*1024
+ if old_speed == "400000" and new_speed != "400000":
+ original_memory += extra_overhead_400G
+ elif old_speed != "400000" and new_speed == "400000":
+ original_memory -= extra_overhead_400G
+
+ if DEFAULT_OVER_SUBSCRIBE_RATIO:
+ curr_shp_size = int(kwargs["shp_size"])
+ if old_pg_number:
+ old_xoff = int(kwargs["old_xoff"])
+ else:
+ old_xoff = 0
+ if new_pg_number and "new_xoff" in kwargs:
+ new_xoff = int(kwargs["new_xoff"])
+ else:
+ new_xoff = old_xoff
+ original_memory += curr_shp_size
+ expected_shp_size = curr_shp_size + (new_xoff * new_pg_number - old_xoff * old_pg_number) / DEFAULT_OVER_SUBSCRIBE_RATIO
+ new_reserved += expected_shp_size
+ else:
+ expected_shp_size = None
+ curr_shp_size = None
+
+ expected_pool_size = (original_memory - new_reserved) / DEFAULT_INGRESS_POOL_NUMBER
+
+ logging.debug("Expected pool {}, expec shp {}, curr_shp {} default ovs {}".format(expected_pool_size, expected_shp_size, curr_shp_size, DEFAULT_OVER_SUBSCRIBE_RATIO))
def _get_pool_size_from_asic_db(duthost, ingress_lossless_pool_oid):
pool_sai = _compose_dict_from_cli(duthost.shell('redis-cli -n 1 hgetall ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_POOL:{}'.format(ingress_lossless_pool_oid))['stdout'].split('\n'))
- return pool_sai['SAI_BUFFER_POOL_ATTR_SIZE']
+ if DEFAULT_SHARED_HEADROOM_POOL_ENABLED:
+ return pool_sai['SAI_BUFFER_POOL_ATTR_SIZE'], pool_sai['SAI_BUFFER_POOL_ATTR_XOFF_SIZE']
+ else:
+ return pool_sai['SAI_BUFFER_POOL_ATTR_SIZE'], None
+
+ def _check_pool_size(duthost, expected_pool_size, expected_shp_size, ingress_lossless_pool_oid):
+ pool_app =_compose_dict_from_cli(duthost.shell('redis-cli hgetall "BUFFER_POOL_TABLE:ingress_lossless_pool"')['stdout'].split('\n'))
- def _check_pool_size(duthost, expected_pool_size, ingress_lossless_pool_oid):
- pool_size = duthost.shell('redis-cli hget "BUFFER_POOL_TABLE:ingress_lossless_pool" size')['stdout']
+ if expected_pool_size and int(pool_app['size']) != expected_pool_size:
+ return False
- if int(pool_size) != expected_pool_size:
+ if DEFAULT_OVER_SUBSCRIBE_RATIO and int(pool_app['xoff']) != expected_shp_size:
return False
if ingress_lossless_pool_oid:
- pool_size = _get_pool_size_from_asic_db(duthost, ingress_lossless_pool_oid)
- if int(pool_size) != expected_pool_size:
+ pool_size, shp_size = _get_pool_size_from_asic_db(duthost, ingress_lossless_pool_oid)
+ if expected_pool_size and int(pool_size) != expected_pool_size:
+ return False
+
+ if expected_shp_size and expected_shp_size != int(shp_size):
return False
return True
- pytest_assert(wait_until(20, 2, _check_pool_size, duthost, expected_pool_size, ingress_lossless_pool_oid),
- "Pool size isn't correct in database: expected {}, size in APPL_DB {}, size in ASIC_DB {}".format(
+ pytest_assert(wait_until(20, 2, _check_pool_size, duthost, expected_pool_size, expected_shp_size, ingress_lossless_pool_oid),
+ "Pool size isn't correct in database: expected pool {} shp {}, size in APPL_DB pool {} shp {}, size in ASIC_DB {}".format(
+ expected_pool_size,
+ expected_shp_size,
+ duthost.shell('redis-cli hget "BUFFER_POOL_TABLE:ingress_lossless_pool" size')['stdout'],
+ duthost.shell('redis-cli hget "BUFFER_POOL_TABLE:ingress_lossless_pool" xoff')['stdout'],
+ _get_pool_size_from_asic_db(duthost, ingress_lossless_pool_oid))
+ if DEFAULT_OVER_SUBSCRIBE_RATIO else
+ "Pool size isn't correct in database: expected {}, size in APPL_DB pool {}, size in ASIC_DB {}".format(
expected_pool_size,
duthost.shell('redis-cli hget "BUFFER_POOL_TABLE:ingress_lossless_pool" size')['stdout'],
- _get_pool_size_from_asic_db(duthost, ingress_lossless_pool_oid)))
+ _get_pool_size_from_asic_db(duthost, ingress_lossless_pool_oid))
+ )
-def check_pg_profile(duthost, pg, expected_profile):
+def check_pg_profile(duthost, pg, expected_profile, fail_test=True):
"""Check whether the profile in BUFFER_PG match the expected value in a wait_until loop with maximum timeout as 10 seconds
Args:
pg: The key of buffer pg in BUFFER_PG table. Format: BUFFER_PG||
expected_profile: The name of the expected profile
+ fail_test: Fail the test by pytest_assert in case expected_profile not found within given time
+
+ Returns:
+ Whether the expected profile has been found within given time
"""
def _check_pg_profile(duthost, pg, expected_profile):
profile = duthost.shell('redis-cli hget {} profile'.format(pg))['stdout'][1:-1]
return (profile == 'BUFFER_PROFILE_TABLE:' + expected_profile)
- pytest_assert(wait_until(10, 2, _check_pg_profile, duthost, pg, expected_profile), "Profile in PG {} isn't {}".format(pg, expected_profile))
+ if wait_until(10, 2, _check_pg_profile, duthost, pg, expected_profile):
+ return True
+ else:
+ if fail_test:
+ pytest_assert(False, "Profile in PG {} isn't {}".format(pg, expected_profile))
+ else:
+ return False
def check_pfc_enable(duthost, port, expected_pfc_enable_map):
@@ -289,7 +419,8 @@ def check_buffer_profile_details(duthost, initial_profiles, profile_name, profil
std_profile = std_profiles_for_speed.get(cable_length)
if std_profile:
# This means it's a profile with std speed and cable length. We can check whether the headroom data is correct
- pytest_assert(profile_appldb['xon'] == std_profile['xon'] and profile_appldb['xoff'] == std_profile['xoff'] and profile_appldb['size'] == std_profile['size'],
+ pytest_assert(profile_appldb['xon'] == std_profile['xon'] and profile_appldb['xoff'] == std_profile['xoff']
+ and (profile_appldb['size'] == std_profile['size'] or DEFAULT_SHARED_HEADROOM_POOL_ENABLED),
"Generated profile {} doesn't match the std profile {}".format(profile_appldb, std_profile))
else:
for std_cable_len, std_profile in std_profiles_for_speed.items():
@@ -444,13 +575,23 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
profile = duthost.shell('redis-cli hget "BUFFER_PG_TABLE:{}:3-4" profile'.format(port_to_test))['stdout'][1:-1]
detect_default_mtu(duthost, port_to_test)
- original_headroom_size = int(duthost.shell('redis-cli hget "{}" size'.format(profile))['stdout'])
+ original_pg_size = int(duthost.shell('redis-cli hget "{}" size'.format(profile))['stdout'])
original_pool_size = int(duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool size')['stdout'])
+ if DEFAULT_OVER_SUBSCRIBE_RATIO:
+ original_pg_xoff = int(duthost.shell('redis-cli hget "{}" xoff'.format(profile))['stdout'])
+ original_shp_size = int(duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool xoff')['stdout'])
+ else:
+ original_pg_xoff = None
+ original_shp_size = None
initial_asic_db_profiles = fetch_initial_asic_db(duthost)
- if speed_to_test == original_speed and cable_len_to_test == original_cable_len and mtu_to_test == DEFAULT_MTU:
- pytest.skip('Speed, MTU and cable length matches the default value, nothing to test, skip')
+ if mtu_to_test == DEFAULT_MTU:
+ if speed_to_test == original_speed and cable_len_to_test == original_cable_len:
+ pytest.skip('Speed, MTU and cable length matches the default value, nothing to test, skip')
+ expected_profile = 'pg_lossless_{}_{}_profile'.format(speed_to_test, cable_len_to_test)
+ if duthost.shell('redis-cli hget BUFFER_PROFILE_TABLE:{}'.format(expected_profile))['stdout']:
+ pytest.skip('The buffer profile has existed, most of the checks can not be performed, skip')
try:
if not speed_to_test == original_speed:
@@ -479,12 +620,18 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
logging.info('SAI OID for newly created profile {} ingress lossless pool {}'.format(profile_oid, pool_oid))
# Check whether profile exist
- headroom_size = int(duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:{}" size'.format(expected_profile))['stdout'])
+ pg_size = int(duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:{}" size'.format(expected_profile))['stdout'])
+ pg_xoff = int(duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:{}" xoff'.format(expected_profile))['stdout']) if DEFAULT_OVER_SUBSCRIBE_RATIO else None
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
- new_headroom = headroom_size)
+ shp_size = original_shp_size,
+ old_speed = original_speed,
+ new_speed = speed_to_test,
+ old_xoff = original_pg_xoff,
+ new_xoff = pg_xoff,
+ old_size = original_pg_size,
+ new_size = pg_size)
# Remove all the lossless profile on the port
logging.info('[Remove all lossless PGs] Checking pool size and pfc_enable')
@@ -493,7 +640,11 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
+ shp_size = original_shp_size,
+ old_speed = original_speed,
+ new_speed = speed_to_test,
+ old_xoff = original_pg_xoff,
+ old_size = original_pg_size,
new_pg_number = 0)
check_pfc_enable(duthost, port_to_test, '')
@@ -509,8 +660,13 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
- new_headroom = headroom_size,
+ shp_size = original_shp_size,
+ old_speed = original_speed,
+ new_speed = speed_to_test,
+ old_xoff = original_pg_xoff,
+ new_xoff = pg_xoff,
+ old_size = original_pg_size,
+ new_size = pg_size,
new_pg_number = 1)
check_pfc_enable(duthost, port_to_test, '6')
@@ -530,12 +686,18 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
expected_profile = 'pg_lossless_{}_{}_profile'.format(speed_to_test, original_cable_len)
check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:6'.format(port_to_test), expected_profile)
- headroom_size = int(duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:{}" size'.format(expected_profile))['stdout'])
+ pg_size = int(duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:{}" size'.format(expected_profile))['stdout'])
+ pg_xoff = int(duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:{}" xoff'.format(expected_profile))['stdout']) if DEFAULT_OVER_SUBSCRIBE_RATIO else None
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
- new_headroom = headroom_size,
+ shp_size = original_shp_size,
+ old_speed = original_speed,
+ new_speed = speed_to_test,
+ old_xoff = original_pg_xoff,
+ new_xoff = pg_xoff,
+ old_size = original_pg_size,
+ new_size = pg_size,
new_pg_number = 1)
duthost.shell('config interface buffer priority-group lossless remove {} 6'.format(port_to_test))
@@ -543,7 +705,12 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
+ shp_size = original_shp_size,
+ old_speed = original_speed,
+ new_speed = speed_to_test,
+ old_xoff = original_pg_xoff,
+ new_xoff = pg_xoff,
+ old_size = original_pg_size,
new_pg_number = 0)
check_pfc_enable(duthost, port_to_test, '')
else:
@@ -568,7 +735,9 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size)
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ old_size = original_pg_size)
logging.info('[Extra lossless PG]')
duthost.shell('config interface buffer priority-group lossless add {} 6'.format(port_to_test))
@@ -579,7 +748,9 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ old_size = original_pg_size,
new_pg_number = 3)
logging.info('[Restore config]')
@@ -590,7 +761,9 @@ def test_change_speed_cable(duthosts, rand_one_dut_hostname, conn_graph_facts, p
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size)
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ old_size = original_pg_size)
finally:
duthost.shell('config interface buffer priority-group lossless remove {}'.format(port_to_test), module_ignore_errors = True)
duthost.shell('config interface speed {} {}'.format(port_to_test, original_speed), module_ignore_errors = True)
@@ -612,7 +785,7 @@ def _parse_buffer_profile_params(param, cmd, name):
- The size of new profile
"""
cli_str = "config buffer profile {} {}".format(cmd, name)
- xon = ""
+ xon = None
if 'xon' in param:
xon = param['xon']
cli_str += " --xon " + xon
@@ -623,18 +796,20 @@ def _parse_buffer_profile_params(param, cmd, name):
cli_str += " --xoff " + xoff
size = ""
- if 'size' in param:
+ if DEFAULT_SHARED_HEADROOM_POOL_ENABLED and xon:
+ new_size = int(xon)
+ elif 'size' in param:
size = param['size']
cli_str += " --size " + size
- new_headroom = int(size)
+ new_size = int(size)
elif xoff and xon:
- new_headroom = int(xon) + int(xoff)
+ new_size = int(xon) + int(xoff)
else:
- new_headroom = None
+ new_size = None
if 'dynamic_th' in param:
cli_str += " --dynamic_th " + param['dynamic_th']
- return cli_str, new_headroom
+ return cli_str, new_size, xoff
def test_headroom_override(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test):
@@ -668,8 +843,14 @@ def test_headroom_override(duthosts, rand_one_dut_hostname, conn_graph_facts, po
original_speed = duthost.shell('redis-cli -n 4 hget "PORT|{}" speed'.format(port_to_test))['stdout']
original_cable_len = duthost.shell('redis-cli -n 4 hget "CABLE_LENGTH|AZURE" {}'.format(port_to_test))['stdout']
original_profile = duthost.shell('redis-cli hget "BUFFER_PG_TABLE:{}:3-4" profile'.format(port_to_test))['stdout'][1:-1]
- original_headroom_size = duthost.shell('redis-cli hget "{}" size'.format(original_profile))['stdout']
+ original_pg_size = duthost.shell('redis-cli hget "{}" size'.format(original_profile))['stdout']
original_pool_size = duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool size')['stdout']
+ if DEFAULT_OVER_SUBSCRIBE_RATIO:
+ original_shp_size = duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool xoff')['stdout']
+ original_pg_xoff = duthost.shell('redis-cli hget "{}" xoff'.format(original_profile))['stdout']
+ else:
+ original_shp_size = None
+ original_pg_xoff = None
initial_asic_db_profiles = fetch_initial_asic_db(duthost)
@@ -679,7 +860,7 @@ def test_headroom_override(duthosts, rand_one_dut_hostname, conn_graph_facts, po
if not param:
pytest.skip('Headroom override test skipped due to no parameters for "add" command provided')
else:
- cli_str, new_headroom = _parse_buffer_profile_params(param, "add", "headroom-override")
+ cli_str, new_size, new_xoff = _parse_buffer_profile_params(param, "add", "headroom-override")
logging.info("[Prepare configuration] {}".format(cli_str))
duthost.shell(cli_str)
@@ -694,8 +875,11 @@ def test_headroom_override(duthosts, rand_one_dut_hostname, conn_graph_facts, po
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
- new_headroom = new_headroom)
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ new_xoff = new_xoff,
+ old_size = original_pg_size,
+ new_size = new_size)
# Add another headroom override
logging.info("[Test: headroom override on more lossless PGs 6] Apply the profile on the PG and check pool size")
@@ -708,32 +892,39 @@ def test_headroom_override(duthosts, rand_one_dut_hostname, conn_graph_facts, po
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
- new_headroom = new_headroom,
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ new_xoff = new_xoff,
+ old_size = original_pg_size,
+ new_size = new_size,
new_pg_number = 3)
param = TESTPARAM_HEADROOM_OVERRIDE.get("set")
if not param:
pytest.skip('Headroom override test skipped due to no parameters for "set" command provided')
else:
- cli_str, new_headroom = _parse_buffer_profile_params(param, "set", "headroom-override")
+ cli_str, new_size, new_xoff = _parse_buffer_profile_params(param, "set", "headroom-override")
+
logging.info("[Test: update headroom-override profile] Update the profile and check pool size: {}".format(cli_str))
duthost.shell(cli_str)
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
- new_headroom = new_headroom,
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ new_xoff = new_xoff,
+ old_size = original_pg_size,
+ new_size = new_size,
new_pg_number = 3)
- # Recover configuration
+ # Restore configuration
logging.info("[Test: static headroom being referenced can not be removed]")
duthost.shell('config buffer profile remove headroom-override', module_ignore_errors = True)
profile = duthost.shell('redis-cli hgetall "BUFFER_PROFILE_TABLE:headroom-override"')['stdout']
pytest_assert(profile, 'Headroom override profile has been removed when being referenced')
- logging.info("[Recover configuration]")
+ logging.info("[Restore configuration]")
duthost.shell('config interface buffer priority-group lossless remove {}'.format(port_to_test))
duthost.shell('config interface buffer priority-group lossless add {} 3-4'.format(port_to_test))
@@ -742,13 +933,167 @@ def test_headroom_override(duthosts, rand_one_dut_hostname, conn_graph_facts, po
check_pool_size(duthost,
pool_oid,
pool_size = original_pool_size,
- old_headroom = original_headroom_size,
+ shp_size = original_shp_size,
+ old_xoff = original_pg_xoff,
+ old_size = original_pg_size,
new_pg_number = 2)
finally:
duthost.shell('config interface buffer priority-group lossless remove {}'.format(port_to_test), module_ignore_errors = True)
duthost.shell('config interface buffer priority-group lossless add {} 3-4'.format(port_to_test), module_ignore_errors = True)
duthost.shell('config buffer profile remove headroom-override', module_ignore_errors = True)
+def check_buffer_profiles_for_shp(duthost, shp_enabled=True):
+ def _check_buffer_profiles_for_shp(duthost, shp_enabled):
+ buffer_profiles = duthost.shell('redis-cli keys "BUFFER_PROFILE_TABLE:*"')['stdout'].split('\n')
+ for profile_name in buffer_profiles:
+ m = re.search(LOSSLESS_PROFILE_PATTERN, profile_name)
+ if m:
+ profile_obj = _compose_dict_from_cli(duthost.shell('redis-cli hgetall {}'.format(profile_name))['stdout'].split('\n'))
+ if shp_enabled:
+ return profile_obj['xon'] == profile_obj['size']
+ else:
+ return int(profile_obj['size']) == int(profile_obj['xon']) + int(profile_obj['xoff'])
+
+ pytest_assert(wait_until(20, 2, _check_buffer_profiles_for_shp, duthost, shp_enabled))
+
+
+def test_shared_headroom_pool_configure(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test):
+ """Test case for shared headroom pool configuration
+
+ Test case to verify the variant commands of shared headroom pool configuration and how they affect the other buffer configurations
+
+ The flow of the test case:
+ 1. If the over subscribe ratio isn't 2, configure it to 2
+ If the size of shared headroom pool is configured: remove it
+ 2. Get shared headroom pool size, check it against the ASIC DB
+ Check the buffer profiles,
+ - For Mellanox platform, for all the buffer profiles, size should be equal to xon
+ 3. Testcase: over subscribe ratio updated
+ - Config over subscribe ratio to 4, check whether the shared headroom pool size is divided by 2
+ 4. Testcase: configure size
+ - Config shared headroom pool size to a certain number which is predefined on a per-vendor basis,
+ Check whether the shared headroom pool size is equal to the configured number
+ 5. Testcase: remove the over subscribe ratio configuration while size is configured
+ - Check the buffer profiles and shared headroom pool size
+ 6. Testcase: remove the shared headroom pool size with over subscribe ratio configured
+ - Config over subscribe ratio to 2, check whether the shared headroom pool size matches the previous value
+ - Remove the size configuration, check whether shared headroom pool is still enabled
+ 7. Testcase: remove both over subscribe ratio and shared headroom pool size
+ 8. Restore configuration
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ original_over_subscribe_ratio = duthost.shell('redis-cli -n 4 hget "DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE" over_subscribe_ratio')['stdout']
+ original_configured_shp_size = duthost.shell('redis-cli -n 4 hget "BUFFER_POOL|ingress_lossless_pool" xoff')['stdout']
+ original_speed = duthost.shell('redis-cli -n 4 hget "PORT|{}" speed'.format(port_to_test))['stdout']
+ original_cable_len = duthost.shell('redis-cli -n 4 hget "CABLE_LENGTH|AZURE" {}'.format(port_to_test))['stdout']
+
+ if not TESTPARAM_SHARED_HEADROOM_POOL:
+ pytest.skip('Shared headroom pool test skipped due to no parameters defined')
+ shp_size_to_test = TESTPARAM_SHARED_HEADROOM_POOL.get("size")
+ if not shp_size_to_test:
+ pytest.skip('Shared headroom pool test skipped due to size not defined')
+
+ try:
+ # First, we need to fetch the SAI OID of ingress lossless pool.
+ # The only way to achieve that is to trigger a new buffer profile creation and then fetch the SAI OID from it
+ initial_asic_db_profiles = fetch_initial_asic_db(duthost)
+ duthost.shell('config interface cable-length {} 10m'.format(port_to_test))
+ expected_profile = 'pg_lossless_{}_10m_profile'.format(original_speed)
+ time.sleep(20)
+ profile_oid, pool_oid = check_buffer_profile_details(duthost, initial_asic_db_profiles, expected_profile, None, None)
+ logging.info('Got SAI OID of ingress lossless pool: {}'.format(pool_oid))
+ # Restore the cable length
+ duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_len))
+ time.sleep(20)
+
+ if original_over_subscribe_ratio != '2':
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 2')
+ if original_configured_shp_size and original_configured_shp_size != '0':
+ duthost.shell('config buffer shared-headroom-pool size 0')
+
+ # Make sure the shp configuration has been deployed
+ time.sleep(30)
+
+ # Check whether the buffer profile for lossless PGs are correct
+ check_buffer_profiles_for_shp(duthost)
+
+ # Fetch initial buffer pool size and shared headroom pool size
+ original_pool_size = duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool size')['stdout']
+ original_shp_size = duthost.shell('redis-cli hget BUFFER_POOL_TABLE:ingress_lossless_pool xoff')['stdout']
+
+ logging.info('[Test: check shared headroom pool size consistency between APPL_DB and ASIC_DB]')
+ check_pool_size(duthost,
+ pool_oid,
+ pool_size = original_pool_size,
+ shp_size = original_shp_size,
+ old_pg_number = 0,
+ new_pg_number = 0)
+
+ logging.info('[Test: update over-subscribe-ratio to 4 and check sizes of buffer pool and shared headroom pool]')
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 4')
+ check_pool_size(duthost,
+ pool_oid,
+ pool_size = original_pool_size,
+ shp_size = original_shp_size,
+ old_ratio = '2',
+ new_ratio = '4',
+ old_pg_number = 0,
+ new_pg_number = 0)
+
+ logging.info('[Test: configure shared headroom pool size and check APPL_DB and ASIC_DB]')
+ duthost.shell('config buffer shared-headroom-pool size {}'.format(shp_size_to_test))
+ check_pool_size(duthost,
+ pool_oid,
+ config_shp_size = shp_size_to_test)
+ check_buffer_profiles_for_shp(duthost)
+
+ logging.info('[Test: remove the over subscribe ratio configuration while size is configured]')
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 0')
+ check_pool_size(duthost,
+ pool_oid,
+ config_shp_size = shp_size_to_test)
+ check_buffer_profiles_for_shp(duthost)
+
+ logging.info('[Test: remove the size configuration while over subscribe ratio is configured]')
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 1')
+ duthost.shell('config buffer shared-headroom-pool size 0')
+ check_pool_size(duthost,
+ pool_oid,
+ pool_size = original_pool_size,
+ shp_size = original_shp_size,
+ old_ratio = '2',
+ new_ratio = '1',
+ old_pg_number = 0,
+ new_pg_number = 0)
+ check_buffer_profiles_for_shp(duthost)
+
+ logging.info('[Test: remove over subscribe ratio]')
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 0')
+ check_pool_size(duthost,
+ pool_oid,
+ pool_size = original_pool_size,
+ shp_size = original_shp_size,
+ old_ratio = '2',
+ new_ratio = '0',
+ old_pg_number = 0,
+ new_pg_number = 0)
+
+ logging.info('[Test: remove over subscribe ratio and then the size]')
+ # Configure over subscribe ratio and shared headroom pool size
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 2')
+ duthost.shell('config buffer shared-headroom-pool size {}'.format(shp_size_to_test))
+ check_pool_size(duthost,
+ pool_oid,
+ config_shp_size = shp_size_to_test)
+ # Remove the over subscribe ratio and then the size
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 0')
+ duthost.shell('config buffer shared-headroom-pool size 0')
+ check_buffer_profiles_for_shp(duthost, shp_enabled = False)
+ finally:
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio {}'.format(original_over_subscribe_ratio), module_ignore_errors = True)
+ duthost.shell('config buffer shared-headroom-pool size {}'.format(original_configured_shp_size), module_ignore_errors = True)
+ duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_len), module_ignore_errors = True)
+
def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test, pg_to_test):
"""Test case for non default dynamic th
@@ -767,7 +1112,7 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
Verify whether the profile created in step 4 is removed
6. Reconfigure it as non default dynamic th profile and check related info
7. Update it to a headroom override profile and check related info
- 8. Recover the configuration
+ 8. Restore the configuration
Args:
port_to_test: On which port will the test be performed
@@ -793,7 +1138,7 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
if not param:
pytest.skip('Lossless pg test skipped due to no parameters for "headroom-override" command provided')
else:
- cli_str, new_headroom = _parse_buffer_profile_params(param, "add", "headroom-override")
+ cli_str, new_size, new_xoff = _parse_buffer_profile_params(param, "add", "headroom-override")
# Create profiles
logging.info('[Preparing]: Create static buffer profile for headroom override')
@@ -808,7 +1153,7 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
if not param:
pytest.skip('Lossless pg test skipped due to no parameters for "non-default-dynamic_th" command provided')
else:
- cli_str, new_headroom = _parse_buffer_profile_params(param, "add", "non-default-dynamic_th")
+ cli_str, new_size, new_xoff = _parse_buffer_profile_params(param, "add", "non-default-dynamic_th")
logging.info('[Preparing]: Create static buffer profile for non default dynamic_th')
duthost.shell(cli_str)
@@ -827,6 +1172,8 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
check_pg_profile(duthost, buffer_pg, 'headroom-override')
if pg_to_test == '3-4':
check_lossless_profile_removed(duthost, expected_profile, profile_oid)
+ else:
+ initial_asic_db_profiles = fetch_initial_asic_db(duthost)
# Update it to non-default dynamic_th
logging.info('[Testcase: headroom override => dynamically calculated headroom with non-default dynamic_th]')
@@ -858,7 +1205,7 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
check_pg_profile(duthost, buffer_pg, 'headroom-override')
check_lossless_profile_removed(duthost, expected_nondef_profile, profile_oid)
- # Update it to dynamic PG, recover
+ # Update it to dynamic PG, restore the configuration
logging.info('[Testcase: headroom override => dynamic headroom]')
duthost.shell(set_command)
check_pg_profile(duthost, buffer_pg, expected_profile)
@@ -868,7 +1215,16 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
duthost.shell('config buffer profile remove headroom-override')
duthost.shell('config buffer profile remove non-default-dynamic_th')
check_lossless_profile_removed(duthost, 'headroom-override', headroom_override_profile_oid)
- check_lossless_profile_removed(duthost, 'non-default-dynamic_th')
+ # No need to check non-default-dynamic_th because it won't propagated to APPL_DB
+
+ # Restore the cable length
+ duthost.shell(set_command)
+
+ duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_len))
+ old_profile = expected_profile
+ expected_profile = 'pg_lossless_{}_{}_profile'.format(original_speed, original_cable_len)
+ check_pg_profile(duthost, buffer_pg, expected_profile)
+ check_lossless_profile_removed(duthost, old_profile, profile_oid)
finally:
if pg_to_test == '3-4':
duthost.shell(set_command, module_ignore_errors = True)
@@ -879,17 +1235,27 @@ def test_lossless_pg(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_
duthost.shell('config buffer profile remove non-default-dynamic_th', module_ignore_errors = True)
+@pytest.mark.disable_loganalyzer
def test_exceeding_headroom(duthosts, rand_one_dut_hostname, conn_graph_facts, port_to_test):
"""The test case for maximum headroom
If the accumulative headroom of a port exceeds the maximum value,
the new configuation causing the violation should not be applied to prevent orchagent from exiting
- The idea is to configure a super long cable which can cause a super large headroom thus exceeding the maximum value.
- Afterthat, verify the profile of the PG isn't changed
-
Args:
port_to_test: Port to run the test
+
+ The flow of the test case:
+ 1. Find the longest possible cable length the port can support.
+ It will also verify whether a super long cable will be applied
+ The test will be skipped if such limit isn't found after the cable length has been increased to 2km.
+ 2. Add extra PGs to a port, which causes the accumulative headroom exceed the limit
+ 3. Configure a headroom-override on a port and then enlarge the size of the profile.
+ Verify whether the large size is applied.
+ 4. Configure a long cable length with shared headroom pool enabled.
+ Verify the size in the profile is updated when shared headroom pool is disabled.
+
+ In each step, it also checks whether the expected error message is found.
"""
duthost = duthosts[rand_one_dut_hostname]
max_headroom_size = duthost.shell('redis-cli -n 6 hget "BUFFER_MAX_PARAM_TABLE|{}" max_headroom_size'.format(port_to_test))['stdout']
@@ -898,59 +1264,153 @@ def test_exceeding_headroom(duthosts, rand_one_dut_hostname, conn_graph_facts, p
original_cable_len = duthost.shell('redis-cli -n 4 hget "CABLE_LENGTH|AZURE" {}'.format(port_to_test))['stdout']
original_speed = duthost.shell('redis-cli -n 4 hget "PORT|{}" speed'.format(port_to_test))['stdout']
- original_profile = 'pg_lossless_{}_{}_profile'.format(original_speed, original_cable_len)
+ original_over_subscribe_ratio = duthost.shell('redis-cli -n 4 hget "DEFAULT_LOSSLESS_BUFFER_PARAMETER|AZURE" over_subscribe_ratio')['stdout']
+ original_configured_shp_size = duthost.shell('redis-cli -n 4 hget "BUFFER_POOL|ingress_lossless_pool" xoff')['stdout']
try:
- # Set to super long cable length
- logging.info('[Config a super long cable length]')
- duthost.shell('config interface cable-length {} 10000m'.format(port_to_test))
-
- logging.info('Verify the profile isn\'t changed')
- check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), original_profile)
- duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_len))
-
- # add additional PG
- logging.info('[Config the cable length on the port]')
- duthost.shell('config interface cable-length {} 300m'.format(port_to_test))
-
- logging.info('Verify the profile has been changed')
- expected_profile = 'pg_lossless_{}_{}_profile'.format(original_speed, '300m')
- check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile)
+ # Test case runs with shared headroom pool disabled
+ # because the headroom size is very small with shared headroom pool enabled
+ if original_over_subscribe_ratio and original_over_subscribe_ratio != '0':
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 0')
+ if original_configured_shp_size and original_configured_shp_size != '0':
+ duthost.shell('config buffer shared-headroom-pool size 0')
+
+ # 1. Find the longest possible cable length the port can support.
+ loganalyzer, marker = init_log_analyzer(duthost,
+ 'Fetch the longest possible cable length',
+ ['Update speed .* and cable length .* for port .* failed, accumulative headroom size exceeds the limit',
+ 'Unable to update profile for port .*. Accumulative headroom size exceeds limit',
+ 'Failed to process table update',
+ 'oid is set to null object id on SAI_OBJECT_TYPE_BUFFER_PROFILE',
+ 'Failed to remove buffer profile .* with type BUFFER_PROFILE_TABLE',
+ 'doTask: Failed to process buffer task, drop it'])
+ logging.info('[Find out the longest cable length the port can support]')
+ cable_length = 300
+ while True:
+ duthost.shell('config interface cable-length {} {}m'.format(port_to_test, cable_length))
+ expected_profile = 'pg_lossless_{}_{}m_profile'.format(original_speed, cable_length)
+ profile_applied = check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile, False)
+ if not profile_applied:
+ break
+ logging.debug('Cable length {} has been applied successfully'.format(cable_length))
+ cable_length += 100
+ if cable_length > 2000:
+ pytest.skip("Not able to find the maximum headroom of port {} after cable length has been increased to 2km, skip the test".format(port_to_test))
+
+ # We've got the maximum cable length that can be applied on the port
+ violating_cable_length = cable_length
+ maximum_cable_length = cable_length - 100
+ logging.info('Got maximum cable length {}'.format(maximum_cable_length))
+
+ # Check whether there is the expected error message in the log
+ logging.info('Check whether the expected error message is found')
+ check_log_analyzer(loganalyzer, marker)
+
+ loganalyzer, marker = init_log_analyzer(duthost,
+ 'Add addtional PGs',
+ ['Update speed .* and cable length .* for port .* failed, accumulative headroom size exceeds the limit',
+ 'Unable to update profile for port .*. Accumulative headroom size exceeds limit'])
+
+ maximum_profile_name = 'pg_lossless_{}_{}m_profile'.format(original_speed, maximum_cable_length)
+ maximum_profile = _compose_dict_from_cli(duthost.shell('redis-cli hgetall BUFFER_PROFILE_TABLE:{}'.format(maximum_profile_name))['stdout'].split())
+
+ # Config the cable length to the longest acceptable value and check the profile
+ logging.info('[Config the cable length to the longest acceptable value on the port]')
+ duthost.shell('config interface cable-length {} {}m'.format(port_to_test, maximum_cable_length))
+ check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), maximum_profile_name)
+
+ # 2. Add extra PGs to a port, which causes the accumulative headroom exceed the limit
logging.info('Add another PG and make sure the system isn\'t broken')
duthost.shell('config interface buffer priority-group lossless add {} {}'.format(port_to_test, '5-7'))
+ profile_applied = check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:5-7'.format(port_to_test), maximum_profile_name, False)
+ pytest_assert(not profile_applied, "Profile {} applied on {}:5-7, which makes the accumulative headroom exceed the limit".format(maximum_profile_name, port_to_test))
- # We can't say whether this will accumulative headroom exceed the limit, but the system should not crash
- # Leverage sanity check to verify that
+ # Check whether there is the expected error message in the log
+ check_log_analyzer(loganalyzer, marker)
+
+ # Restore the configuration
duthost.shell('config interface buffer priority-group lossless remove {} {}'.format(port_to_test, '5-7'))
duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_len))
- # Static profile
+ # 3. Configure a headroom-override on a port and then enlarge the size of the profile.
+ loganalyzer, marker = init_log_analyzer(duthost,
+ 'Static profile',
+ ['Update speed .* and cable length .* for port .* failed, accumulative headroom size exceeds the limit',
+ 'Unable to update profile for port .*. Accumulative headroom size exceeds limit'])
+
logging.info('[Config headroom override to PG 3-4]')
- duthost.shell('config buffer profile add test-headroom --xon 18432 --xoff 50000 -headroom 68432')
+ duthost.shell('config buffer profile add test-headroom --xon {} --xoff {} --size {}'.format(
+ maximum_profile['xon'], maximum_profile['xoff'], maximum_profile['size']))
duthost.shell('config interface buffer priority-group lossless set {} {} {}'.format(port_to_test, '3-4', 'test-headroom'))
logging.info('Verify the profile is applied')
check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), 'test-headroom')
+
+ # Apply the profile on other PGs, which make the accumulative headroom exceed the limit
duthost.shell('config interface buffer priority-group lossless add {} {} {}'.format(port_to_test, '5-7', 'test-headroom'))
+ # Make sure the profile hasn't been applied
+ profile_applied = check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:5-7'.format(port_to_test), 'test-headroom', False)
+ pytest_assert(not profile_applied, "Profile {} applied on {}:5-7, which makes the accumulative headroom exceed the limit".format(maximum_profile_name, port_to_test))
+
+ # Check log
+ check_log_analyzer(loganalyzer, marker)
- # Again, we can't say for sure whether the accumulative headroom exceeding.
- # Just make sure the system doesn't crash
+ # Restore configuration
duthost.shell('config interface buffer priority-group lossless remove {} {}'.format(port_to_test, '5-7'))
+ # Update static profile to a larger size, which makes it exceeds the port headroom limit
+ # Setup the log analyzer
+ loganalyzer, marker = init_log_analyzer(duthost,
+ 'Configure a larger size to a static profile',
+ ['BUFFER_PROFILE .* cannot be updated because .* referencing it violates the resource limitation',
+ 'Unable to update profile for port .*. Accumulative headroom size exceeds limit'])
+
logging.info('[Update headroom override to a larger size]')
- duthost.shell('config buffer profile set test-headroom --xon 18432 --xoff 860160 -headroom 878592')
+ duthost.shell('config buffer profile set test-headroom --size {}'.format(int(maximum_profile['size']) * 2))
# This should make it exceed the limit, so the profile should not applied to the APPL_DB
+ time.sleep(20)
size_in_appldb = duthost.shell('redis-cli hget "BUFFER_PROFILE_TABLE:test-headroom" size')['stdout']
- pytest_assert(size_in_appldb == '68432', 'The profile with a large size was applied to APPL_DB, which can make headroom exceeding')
+ pytest_assert(size_in_appldb == maximum_profile['size'], 'The profile with a large size was applied to APPL_DB, which can make headroom exceeding')
+
+ # Check log
+ check_log_analyzer(loganalyzer, marker)
+
+ # Restore config
duthost.shell('config interface buffer priority-group lossless set {} {}'.format(port_to_test, '3-4'))
duthost.shell('config buffer profile remove test-headroom')
- logging.info('[Clean up]')
+
+ # 4. Configure a long cable length with shared headroom pool enabled.
+ loganalyzer, marker = init_log_analyzer(duthost,
+ 'Toggle shared headroom pool',
+ ['BUFFER_PROFILE .* cannot be updated because .* referencing it violates the resource limitation',
+ 'Unable to update profile for port .*. Accumulative headroom size exceeds limit',
+ 'refreshSharedHeadroomPool: Failed to update buffer profile .* when toggle shared headroom pool'])
+
+ # Enable shared headroom pool
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 2')
+ time.sleep(20)
+ # And then configure the cable length which causes the accumulative headroom exceed the limit
+ duthost.shell('config interface cable-length {} {}m'.format(port_to_test, violating_cable_length))
+ expected_profile = 'pg_lossless_{}_{}m_profile'.format(original_speed, violating_cable_length)
+ check_pg_profile(duthost, 'BUFFER_PG_TABLE:{}:3-4'.format(port_to_test), expected_profile)
+
+ # Disable shared headroom pool
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio 0')
+ time.sleep(20)
+ # Make sure the size isn't updated
+ profile_appldb = _compose_dict_from_cli(duthost.shell('redis-cli hgetall BUFFER_PROFILE_TABLE:{}'.format(expected_profile))['stdout'].split('\n'))
+ assert profile_appldb['xon'] == profile_appldb['size']
+
+ # Check log
+ check_log_analyzer(loganalyzer, marker)
finally:
+ logging.info('[Clean up]')
duthost.shell('config interface cable-length {} {}'.format(port_to_test, original_cable_len), module_ignore_errors = True)
duthost.shell('config interface buffer priority-group lossless remove {} 5-7'.format(port_to_test), module_ignore_errors = True)
duthost.shell('config interface buffer priority-group lossless set {} 3-4'.format(port_to_test), module_ignore_errors = True)
duthost.shell('config buffer profile remove test-headroom', module_ignore_errors = True)
+ duthost.shell('config buffer shared-headroom-pool over-subscribe-ratio {}'.format(original_over_subscribe_ratio), module_ignore_errors = True)
def _recovery_to_dynamic_buffer_model(duthost):
diff --git a/tests/qos/test_pfc_pause.py b/tests/qos/test_pfc_pause.py
index 2a607024172..6f2ce4d3a2f 100644
--- a/tests/qos/test_pfc_pause.py
+++ b/tests/qos/test_pfc_pause.py
@@ -1,21 +1,23 @@
-
-import pytest
+import logging
import os
-import time
-import re
-import struct
+import pytest
import random
-from tests.common.fixtures.conn_graph_facts import conn_graph_facts
-from qos_fixtures import lossless_prio_dscp_map, leaf_fanouts
-from qos_helpers import ansible_stdout_to_str, eos_to_linux_intf, start_pause, stop_pause, setup_testbed, gen_testbed_t0, PFC_GEN_FILE, PFC_GEN_REMOTE_PATH
+import time
+from qos_fixtures import lossless_prio_dscp_map
+from qos_helpers import ansible_stdout_to_str, get_phy_intfs, get_addrs_in_subnet, get_active_vlan_members, get_vlan_subnet, natural_keys
+from tests.common.fixtures.conn_graph_facts import conn_graph_facts, fanout_graph_facts
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
from tests.common.fixtures.ptfhost_utils import change_mac_addresses # lgtm[py/unused-import]
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.pfc_storm import PFCStorm
pytestmark = [
pytest.mark.topology('t0')
]
+logger = logging.getLogger(__name__)
+
PFC_PKT_COUNT = 1000000000
PTF_FILE_REMOTE_PATH = '~/ptftests/pfc_pause_test.py'
@@ -26,45 +28,91 @@
""" Maximum number of interfaces to test on a DUT """
MAX_TEST_INTFS_COUNT = 4
-def run_test_t0(fanouthosts,
- duthost,
- ptfhost,
- conn_graph_facts,
- leaf_fanouts,
- dscp,
- dscp_bg,
- queue_paused,
- send_pause,
- pfc_pause,
- pause_prio,
- pause_time=65535,
- max_test_intfs_count=128):
+@pytest.fixture(scope="module", autouse=True)
+def pfc_test_setup(duthosts, rand_one_dut_hostname, tbinfo):
"""
- @Summary: Run a series of tests on a T0 topology.
- For the T0 topology, we only test Vlan (server-faced) interfaces.
- @param conn_graph_facts: Testbed topology
- @param leaf_fanouts: Leaf fanout switches
- @param dscp: DSCP value of test data packets
- @param dscp_bg: DSCP value of background data packets
- @param queue_paused: if the queue is expected to be paused
- @param send_pause: send pause frames or not
- @param pfc_pause: send PFC pause frames or not
- @param pause_prio: priority of PFC franme
- @param pause_time: pause time quanta. It is 65535 (maximum pause time quanta) by default.
- @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces.
- return: Return # of iterations and # of passed iterations for each tested interface.
+ Generate configurations for the tests
+
+ Args:
+ duthosts(AnsibleHost) : multi dut instance
+ rand_one_dut_hostname(string) : one of the dut instances from the multi dut
+
+ Yields:
+ setup(dict): DUT interfaces, PTF interfaces, PTF IP addresses, and PTF MAC addresses
"""
- """ Clear DUT's PFC counters """
- duthost.sonic_pfc_counters(method="clear")
+ """ Get all the active physical interfaces enslaved to the Vlan """
+ """ These interfaces are actually server-faced interfaces at T0 """
+ duthost = duthosts[rand_one_dut_hostname]
+ vlan_members = get_active_vlan_members(duthost)
+
+ """ Get Vlan subnet """
+ vlan_subnet = get_vlan_subnet(duthost)
+
+ """ Generate IP addresses for servers in the Vlan """
+ vlan_ip_addrs = get_addrs_in_subnet(vlan_subnet, len(vlan_members))
+
+ """ Generate MAC addresses 00:00:00:00:00:XX for servers in the Vlan """
+ vlan_mac_addrs = [5 * '00:' + format(k, '02x') for k in random.sample(range(1, 256), len(vlan_members))]
+
+ """ Find correspoinding interfaces on PTF """
+ phy_intfs = get_phy_intfs(duthost)
+ phy_intfs.sort(key=natural_keys)
+ vlan_members.sort(key=natural_keys)
+ vlan_members_index = [phy_intfs.index(intf) for intf in vlan_members]
+ ptf_intfs = ['eth' + str(i) for i in vlan_members_index]
""" Disable DUT's PFC wd """
duthost.shell('sudo pfcwd stop')
- """ Generate a T0 testbed configuration """
- dut_intfs, ptf_intfs, ptf_ip_addrs, ptf_mac_addrs = gen_testbed_t0(duthost)
+ yield {
+ 'vlan_members': vlan_members,
+ 'ptf_intfs': ptf_intfs,
+ 'vlan_ip_addrs': vlan_ip_addrs,
+ 'vlan_mac_addrs': vlan_mac_addrs
+ }
+
+ """ Enable DUT's PFC wd """
+ if 'dualtor' not in tbinfo['topo']['name']:
+ duthost.shell('sudo pfcwd start_default')
+
+def run_test(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts,
+ fanout_info, traffic_params, pause_prio=None, queue_paused=True,
+ send_pause=True, pfc_pause=True, max_test_intfs_count=128):
+ """
+ Run the test
+
+ Args:
+ pfc_test_setup(fixture) : setup fixture
+ fanouthosts(AnsibleHost) : fanout instance
+ duthost(AnsibleHost) : dut instance
+ ptfhost(AnsibleHost) : ptf instance
+ conn_graph_facts(fixture) : Testbed topology
+ fanout_info(fixture) : fanout graph info
+ traffic_params(dict) : dict containing the dscp of test dscp and background dscp
+ pause_prio(string) : priority of PFC franme
+ queue_paused(bool) : if the queue is expected to be paused
+ send_pause(bool) : send pause frames or not
+ pfc_pause(bool) : send PFC pause frames or not
+ max_test_intfs_count(int) : maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces
+
+ Return:
+ Number of iterations and number of passed iterations for each tested interface.
+ """
+
+ setup = pfc_test_setup
+ dut_intfs = setup['vlan_members']
+ ptf_intfs = setup['ptf_intfs']
+ ptf_ip_addrs = setup['vlan_ip_addrs']
+ ptf_mac_addrs = setup['vlan_mac_addrs']
+ """ Clear DUT's PFC counters """
+ duthost.sonic_pfc_counters(method="clear")
+
+
results = dict()
+ all_peer_dev = set()
+ storm_handle = None
for i in range(min(max_test_intfs_count, len(ptf_intfs))):
src_index = i
dst_index = (i + 1) % len(ptf_intfs)
@@ -88,31 +136,37 @@ def run_test_t0(fanouthosts,
if send_pause:
peer_device = conn_graph_facts['device_conn'][duthost.hostname][dut_intf_paused]['peerdevice']
peer_port = conn_graph_facts['device_conn'][duthost.hostname][dut_intf_paused]['peerport']
- peer_port_name = eos_to_linux_intf(peer_port)
- peerdev_ans = fanouthosts[peer_device]
+ peer_info = { 'peerdevice': peer_device,
+ 'pfc_fanout_interface': peer_port
+ }
if not pfc_pause:
pause_prio = None
- start_pause(host_ans=peerdev_ans,
- pkt_gen_path=PFC_GEN_REMOTE_PATH,
- intf=peer_port_name,
- pkt_count=PFC_PKT_COUNT,
- pause_duration=pause_time,
- pause_priority=pause_prio)
-
+ if not storm_handle:
+ storm_handle = PFCStorm(duthost, fanout_info, fanouthosts,
+ pfc_queue_idx=pause_prio,
+ pfc_frames_number=PFC_PKT_COUNT,
+ peer_info=peer_info)
+
+ storm_handle.update_peer_info(peer_info)
+ if not all_peer_dev or peer_device not in all_peer_dev:
+ storm_handle.deploy_pfc_gen()
+ all_peer_dev.add(peer_device)
+ storm_handle.start_storm()
""" Wait for PFC pause frame generation """
time.sleep(1)
""" Run PTF test """
+ logger.info("Running test: src intf: {} dest intf: {}".format(dut_intfs[src_index], dut_intfs[dst_index]))
intf_info = '--interface %d@%s --interface %d@%s' % (src_index, src_intf, dst_index, dst_intf)
test_params = ("mac_src=\'%s\';" % src_mac
+ "mac_dst=\'%s\';" % dst_mac
+ "ip_src=\'%s\';" % src_ip
+ "ip_dst=\'%s\';" % dst_ip
- + "dscp=%d;" % dscp
- + "dscp_bg=%d;" % dscp_bg
+ + "dscp=%d;" % traffic_params['dscp']
+ + "dscp_bg=%d;" % traffic_params['dscp_bg']
+ "pkt_count=%d;" % PTF_PKT_COUNT
+ "pkt_intvl=%f;" % PTF_PKT_INTVL_SEC
+ "port_src=%d;" % src_index
@@ -139,197 +193,151 @@ def run_test_t0(fanouthosts,
if send_pause:
""" Stop PFC / FC storm """
- stop_pause(peerdev_ans, PFC_GEN_FILE)
+ storm_handle.stop_storm()
time.sleep(1)
return results
+def test_pfc_pause_lossless(pfc_test_setup, fanouthosts, duthost, ptfhost,
+ conn_graph_facts, fanout_graph_facts,
+ lossless_prio_dscp_map, enum_dut_lossless_prio):
-def run_test(fanouthosts,
- duthost,
- ptfhost,
- tbinfo,
- conn_graph_facts,
- leaf_fanouts,
- dscp,
- dscp_bg,
- queue_paused,
- send_pause,
- pfc_pause,
- pause_prio,
- pause_time=65535,
- max_test_intfs_count=128):
"""
- @Summary: Run a series of tests (only support T0 topology)
- @param tbinfo: Testbed information
- @param conn_graph_facts: Testbed topology
- @param leaf_fanouts: Leaf fanout switches
- @param dscp: DSCP value of test data packets
- @param dscp_bg: DSCP value of background data packets
- @param queue_paused: if the queue is expected to be paused
- @param send_pause: send pause frames or not
- @param pfc_pause: send PFC pause frames or not
- @param pause_prio: priority of PFC franme
- @param pause_time: pause time quanta. It is 65535 (maximum pause time quanta) by default.
- @param max_test_intfs_count: maximum count of interfaces to test. By default, it is a very large value to cover all the interfaces.
- return: Return # of iterations and # of passed iterations for each tested interface.
+ Test if PFC pause frames can pause a lossless priority without affecting the other priorities
+
+ Args:
+ pfc_test_setup(fixture) : setup fixture
+ fanouthosts(AnsibleHost) : fanout instance
+ duthost(AnsibleHost) : dut instance
+ ptfhost(AnsibleHost) : ptf instance
+ conn_graph_facts(fixture) : Testbed topology
+ fanout_graph_facts(fixture) : fanout graph info
+ lossless_prio_dscp_map(dict) : lossless priorities and their DSCP values
+ enum_dut_lossless_prio (str): name of lossless priority to test
"""
- print tbinfo
- if tbinfo['topo']['name'].startswith('t0'):
- return run_test_t0(fanouthosts=fanouthosts,
- duthost=duthost,
- ptfhost=ptfhost,
- conn_graph_facts=conn_graph_facts, leaf_fanouts=leaf_fanouts,
- dscp=dscp,
- dscp_bg=dscp_bg,
- queue_paused=queue_paused,
- send_pause=send_pause,
- pfc_pause=pfc_pause,
- pause_prio=pause_prio,
- pause_time=pause_time,
- max_test_intfs_count=max_test_intfs_count)
-
- else:
- return None
-
-def test_pfc_pause_lossless(fanouthosts,
- duthost,
- ptfhost,
- tbinfo,
- conn_graph_facts,
- leaf_fanouts,
- lossless_prio_dscp_map):
-
- """
- @Summary: Test if PFC pause frames can pause a lossless priority without affecting the other priorities
- @param tbinfo: Testbed information
- @param conn_graph_facts: Testbed topology
- @param lossless_prio_dscp_map: lossless priorities and their DSCP values
- """
- setup_testbed(fanouthosts=fanouthosts,
- ptfhost=ptfhost,
- leaf_fanouts=leaf_fanouts)
-
+ test_errors = ""
errors = []
+ prio = int(enum_dut_lossless_prio.split('|')[-1])
+ dscp = lossless_prio_dscp_map[prio]
+ other_lossless_prio = 4 if prio == 3 else 3
+
- """ DSCP vlaues for lossless priorities """
- lossless_dscps = [int(dscp) for prio in lossless_prio_dscp_map for dscp in lossless_prio_dscp_map[prio]]
+ """ DSCP values for other lossless priority """
+ other_lossless_dscps = lossless_prio_dscp_map[other_lossless_prio]
""" DSCP values for lossy priorities """
- lossy_dscps = list(set(range(64)) - set(lossless_dscps))
-
- for prio in lossless_prio_dscp_map:
- """ DSCP values of the other lossless priorities """
- other_lossless_dscps = list(set(lossless_dscps) - set(lossless_prio_dscp_map[prio]))
- """ We also need to test some DSCP values for lossy priorities """
- other_dscps = other_lossless_dscps + lossy_dscps[0:2]
-
- for dscp in lossless_prio_dscp_map[prio]:
- for dscp_bg in other_dscps:
- results = run_test(fanouthosts=fanouthosts,
- duthost=duthost,
- ptfhost=ptfhost,
- tbinfo=tbinfo,
- conn_graph_facts=conn_graph_facts,
- leaf_fanouts=leaf_fanouts,
- dscp=dscp,
- dscp_bg=dscp_bg,
- queue_paused=True,
- send_pause=True,
- pfc_pause=True,
- pause_prio=prio,
- pause_time=65535,
- max_test_intfs_count=MAX_TEST_INTFS_COUNT)
-
- """ results should not be none """
- if results is None:
- assert 0
-
- errors = dict()
- for intf in results:
- if len(results[intf]) != 2:
- continue
-
- pass_count = results[intf][0]
- total_count = results[intf][1]
-
- if total_count == 0:
- continue
-
- if pass_count < total_count * PTF_PASS_RATIO_THRESH:
- errors[intf] = results[intf]
-
- if len(errors) > 0:
- print "errors occured:\n{}".format("\n".join(errors))
- assert 0
-
-def test_no_pfc(fanouthosts,
- duthost,
- ptfhost,
- tbinfo,
- conn_graph_facts,
- leaf_fanouts,
- lossless_prio_dscp_map):
+ lossy_dscps = list(set(range(64)) - set(other_lossless_dscps) - set(dscp))
+
+ """ We also need to test some DSCP values for lossy priorities """
+ other_dscps = other_lossless_dscps + lossy_dscps[0:2]
+
+ for dscp_bg in other_dscps:
+ logger.info("Testing dscp: {} and background dscp: {}".format(dscp, dscp_bg))
+ traffic_params = {'dscp': dscp[0], 'dscp_bg': dscp_bg}
+ results = run_test(pfc_test_setup,
+ fanouthosts,
+ duthost,
+ ptfhost,
+ conn_graph_facts,
+ fanout_graph_facts,
+ traffic_params,
+ queue_paused=True,
+ send_pause=True,
+ pfc_pause=True,
+ pause_prio=prio,
+ max_test_intfs_count=MAX_TEST_INTFS_COUNT)
+
+ """ results should not be none """
+ if results is None:
+ test_errors += "Dscp: {}, Background Dscp: {}, Result is empty\n".format(dscp, dscp_bg)
+
+ errors = dict()
+ for intf in results:
+ if len(results[intf]) != 2:
+ continue
+
+ pass_count = results[intf][0]
+ total_count = results[intf][1]
+
+ if total_count == 0:
+ continue
+
+ if pass_count < total_count * PTF_PASS_RATIO_THRESH:
+ errors[intf] = results[intf]
+
+ if len(errors) > 0:
+ test_errors += "Dscp: {}, Background Dscp: {}, errors occured: {}\n"\
+ .format(dscp, dscp_bg, " ".join(["{}:{}".format(k,v) for k, v in errors.items()]))
+
+ pytest_assert(len(test_errors) == 0, test_errors)
+
+def test_no_pfc(pfc_test_setup, fanouthosts, duthost, ptfhost, conn_graph_facts,
+ fanout_graph_facts, lossless_prio_dscp_map, enum_dut_lossless_prio):
"""
- @Summary: Test if lossless and lossy priorities can forward packets in the absence of PFC pause frames
- @param fanouthosts: Fixture for fanout hosts
- @param tbinfo: Testbed information
- @param conn_graph_facts: Testbed topology
- @param lossless_prio_dscp_map: lossless priorities and their DSCP values
+ Test if lossless and lossy priorities can forward packets in the absence of PFC pause frames
+
+ Args:
+ pfc_test_setup(fixture) : setup fixture
+ fanouthosts(AnsibleHost) : fanout instance
+ duthost(AnsibleHost) : dut instance
+ ptfhost(AnsibleHost) : ptf instance
+ conn_graph_facts(fixture) : Testbed topology
+ fanout_graph_facts(fixture) : fanout graph info
+ lossless_prio_dscp_map(dict) : lossless priorities and their DSCP values
+ enum_dut_lossless_prio (str): name of lossless priority to test
"""
- setup_testbed(fanouthosts=fanouthosts,
- ptfhost=ptfhost,
- leaf_fanouts=leaf_fanouts)
+ test_errors = ""
errors = []
+ prio = int(enum_dut_lossless_prio.split('|')[-1])
+ dscp = lossless_prio_dscp_map[prio]
+ other_lossless_prio = 4 if prio == 3 else 3
- """ DSCP vlaues for lossless priorities """
- lossless_dscps = [int(dscp) for prio in lossless_prio_dscp_map for dscp in lossless_prio_dscp_map[prio]]
+ """ DSCP values for other lossless priority """
+ other_lossless_dscps = lossless_prio_dscp_map[other_lossless_prio]
""" DSCP values for lossy priorities """
- lossy_dscps = list(set(range(64)) - set(lossless_dscps))
-
- for prio in lossless_prio_dscp_map:
- """ DSCP values of the other lossless priorities """
- other_lossless_dscps = list(set(lossless_dscps) - set(lossless_prio_dscp_map[prio]))
- """ We also need to test some DSCP values for lossy priorities """
- other_dscps = other_lossless_dscps + lossy_dscps[0:2]
-
- for dscp in lossless_prio_dscp_map[prio]:
- for dscp_bg in other_dscps:
- results = run_test(fanouthosts=fanouthosts,
- duthost=duthost,
- ptfhost=ptfhost,
- tbinfo=tbinfo,
- conn_graph_facts=conn_graph_facts,
- leaf_fanouts=leaf_fanouts,
- dscp=dscp,
- dscp_bg=dscp_bg,
- queue_paused=False,
- send_pause=False,
- pfc_pause=None,
- pause_prio=None,
- pause_time=None,
- max_test_intfs_count=MAX_TEST_INTFS_COUNT)
-
- """ results should not be none """
- if results is None:
- assert 0
-
- errors = dict()
- for intf in results:
- if len(results[intf]) != 2:
- continue
-
- pass_count = results[intf][0]
- total_count = results[intf][1]
-
- if total_count == 0:
- continue
-
- if pass_count < total_count * PTF_PASS_RATIO_THRESH:
- errors[intf] = results[intf]
-
- if len(errors) > 0:
- print "errors occured:\n{}".format("\n".join(errors))
- assert 0
+ lossy_dscps = list(set(range(64)) - set(other_lossless_dscps) - set(dscp))
+
+ """ We also need to test some DSCP values for lossy priorities """
+ other_dscps = other_lossless_dscps + lossy_dscps[0:2]
+
+ for dscp_bg in other_dscps:
+ logger.info("Testing dscp: {} and background dscp: {}".format(dscp, dscp_bg))
+ traffic_params = {'dscp': dscp[0], 'dscp_bg': dscp_bg}
+ results = run_test(pfc_test_setup,
+ fanouthosts,
+ duthost,
+ ptfhost,
+ conn_graph_facts,
+ fanout_graph_facts,
+ traffic_params,
+ queue_paused=False,
+ send_pause=False,
+ pfc_pause=None,
+ pause_prio=None,
+ max_test_intfs_count=MAX_TEST_INTFS_COUNT)
+
+ """ results should not be none """
+ if results is None:
+ test_errors += "Dscp: {}, Background Dscp: {}, Result is empty\n".format(dscp, dscp_bg)
+
+ errors = dict()
+ for intf in results:
+ if len(results[intf]) != 2:
+ continue
+
+ pass_count = results[intf][0]
+ total_count = results[intf][1]
+
+ if total_count == 0:
+ continue
+
+ if pass_count < total_count * PTF_PASS_RATIO_THRESH:
+ errors[intf] = results[intf]
+
+ if len(errors) > 0:
+ test_errors += "Dscp: {}, Background Dscp: {}, errors occured: {}\n"\
+ .format(dscp, dscp_bg, " ".join(["{}:{}".format(k,v) for k, v in errors.items()]))
+
+ pytest_assert(len(test_errors) == 0, test_errors)
diff --git a/tests/qos/test_qos_sai.py b/tests/qos/test_qos_sai.py
index 0508b06fa4e..dd9a329cf7b 100644
--- a/tests/qos/test_qos_sai.py
+++ b/tests/qos/test_qos_sai.py
@@ -54,14 +54,19 @@ class TestQosSai(QosSaiBase):
'Arista-7260CX3-Q64'
]
- def testParameter(self, duthost, dutConfig, dutQosConfig, ingressLosslessProfile, ingressLossyProfile, egressLosslessProfile):
+ def testParameter(
+ self, duthost, dutConfig, dutQosConfig, ingressLosslessProfile,
+ ingressLossyProfile, egressLosslessProfile
+ ):
logger.info("asictype {}".format(duthost.facts["asic_type"]))
logger.info("config {}".format(dutConfig))
logger.info("qosConfig {}".format(dutQosConfig))
@pytest.mark.parametrize("xoffProfile", ["xoff_1", "xoff_2"])
- def testQosSaiPfcXoffLimit(self, xoffProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
- ingressLosslessProfile, egressLosslessProfile):
+ def testQosSaiPfcXoffLimit(
+ self, xoffProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ ingressLosslessProfile, egressLosslessProfile
+ ):
"""
Test QoS SAI XOFF limits
@@ -101,11 +106,15 @@ def testQosSaiPfcXoffLimit(self, xoffProfile, ptfhost, dutTestParams, dutConfig,
})
if "pkts_num_margin" in qosConfig[xoffProfile].keys():
testParams["pkts_num_margin"] = qosConfig[xoffProfile]["pkts_num_margin"]
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.PFCtest", testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.PFCtest", testParams=testParams
+ )
@pytest.mark.parametrize("xonProfile", ["xon_1", "xon_2"])
- def testQosSaiPfcXonLimit(self, xonProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
- ingressLosslessProfile):
+ def testQosSaiPfcXonLimit(
+ self, xonProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ ingressLosslessProfile
+ ):
"""
Test QoS SAI XON limits
@@ -149,9 +158,14 @@ def testQosSaiPfcXonLimit(self, xonProfile, ptfhost, dutTestParams, dutConfig, d
testParams["pkts_num_hysteresis"] = qosConfig[xonProfile]["pkts_num_hysteresis"]
if "pkts_num_margin" in qosConfig[xonProfile].keys():
testParams["pkts_num_margin"] = qosConfig[xonProfile]["pkts_num_margin"]
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.PFCXonTest", testParams=testParams)
-
- def testQosSaiHeadroomPoolSize(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.PFCXonTest", testParams=testParams
+ )
+
+ def testQosSaiHeadroomPoolSize(
+ self, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ ingressLosslessProfile
+ ):
"""
Test QoS SAI Headroom pool size
@@ -210,9 +224,16 @@ def testQosSaiHeadroomPoolSize(self, ptfhost, dutTestParams, dutConfig, dutQosCo
if margin:
testParams["margin"] = margin
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest", testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest",
+ testParams=testParams
+ )
- def testQosSaiHeadroomPoolWatermark(self, duthosts, rand_one_dut_hostname, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile, sharedHeadroomPoolSize, resetWatermark):
+ def testQosSaiHeadroomPoolWatermark(
+ self, duthosts, rand_one_dut_hostname, ptfhost, dutTestParams,
+ dutConfig, dutQosConfig, ingressLosslessProfile, sharedHeadroomPoolSize,
+ resetWatermark
+ ):
"""
Test QoS SAI Headroom pool watermark
@@ -263,10 +284,16 @@ def testQosSaiHeadroomPoolWatermark(self, duthosts, rand_one_dut_hostname, ptfh
"buf_pool_roid": ingressLosslessProfile["bufferPoolRoid"],
"max_headroom": sharedHeadroomPoolSize
})
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest", testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.HdrmPoolSizeTest",
+ testParams=testParams
+ )
@pytest.mark.parametrize("bufPool", ["wm_buf_pool_lossless", "wm_buf_pool_lossy"])
- def testQosSaiBufferPoolWatermark(self, request, bufPool, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLosslessProfile, egressLossyProfile, resetWatermark):
+ def testQosSaiBufferPoolWatermark(
+ self, request, bufPool, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ ingressLosslessProfile, egressLossyProfile, resetWatermark,
+ ):
"""
Test QoS SAI Queue buffer pool watermark for lossless/lossy traffic
@@ -321,9 +348,15 @@ def testQosSaiBufferPoolWatermark(self, request, bufPool, ptfhost, dutTestParams
"cell_size": qosConfig[bufPool]["cell_size"],
"buf_pool_roid": buf_pool_roid
})
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.BufferPoolWatermarkTest", testParams=testParams)
-
- def testQosSaiLossyQueue(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, ingressLossyProfile):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.BufferPoolWatermarkTest",
+ testParams=testParams
+ )
+
+ def testQosSaiLossyQueue(
+ self, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ ingressLossyProfile
+ ):
"""
Test QoS SAI Lossy queue, shared buffer dynamic allocation
@@ -366,9 +399,14 @@ def testQosSaiLossyQueue(self, ptfhost, dutTestParams, dutConfig, dutQosConfig,
testParams["cell_size"] = qosConfig["lossy_queue_1"]["cell_size"]
if "pkts_num_margin" in qosConfig["lossy_queue_1"].keys():
testParams["pkts_num_margin"] = qosConfig["lossy_queue_1"]["pkts_num_margin"]
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.LossyQueueTest", testParams=testParams)
-
- def testQosSaiDscpQueueMapping(self, ptfhost, dutTestParams, dutConfig):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.LossyQueueTest",
+ testParams=testParams
+ )
+
+ def testQosSaiDscpQueueMapping(
+ self, ptfhost, dutTestParams, dutConfig
+ ):
"""
Test QoS SAI DSCP to queue mapping
@@ -392,9 +430,14 @@ def testQosSaiDscpQueueMapping(self, ptfhost, dutTestParams, dutConfig):
"src_port_id": dutConfig["testPorts"]["src_port_id"],
"src_port_ip": dutConfig["testPorts"]["src_port_ip"],
})
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.DscpMappingPB", testParams=testParams)
-
- def testQosSaiDwrr(self, ptfhost, dutTestParams, dutConfig, dutQosConfig):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.DscpMappingPB",
+ testParams=testParams
+ )
+
+ def testQosSaiDwrr(
+ self, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ ):
"""
Test QoS SAI DWRR
@@ -432,11 +475,15 @@ def testQosSaiDwrr(self, ptfhost, dutTestParams, dutConfig, dutQosConfig):
"limit": qosConfig["wrr"]["limit"],
"pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"],
})
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams
+ )
@pytest.mark.parametrize("pgProfile", ["wm_pg_shared_lossless", "wm_pg_shared_lossy"])
- def testQosSaiPgSharedWatermark(self, pgProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
- resetWatermark):
+ def testQosSaiPgSharedWatermark(
+ self, pgProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ resetWatermark
+ ):
"""
Test QoS SAI PG shared watermark test for lossless/lossy traffic
@@ -483,9 +530,14 @@ def testQosSaiPgSharedWatermark(self, pgProfile, ptfhost, dutTestParams, dutConf
})
if "packet_size" in qosConfig[pgProfile].keys():
testParams["packet_size"] = qosConfig[pgProfile]["packet_size"]
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.PGSharedWatermarkTest", testParams=testParams)
-
- def testQosSaiPgHeadroomWatermark(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.PGSharedWatermarkTest",
+ testParams=testParams
+ )
+
+ def testQosSaiPgHeadroomWatermark(
+ self, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark,
+ ):
"""
Test QoS SAI PG headroom watermark test
@@ -523,10 +575,16 @@ def testQosSaiPgHeadroomWatermark(self, ptfhost, dutTestParams, dutConfig, dutQo
})
if "pkts_num_margin" in qosConfig["wm_pg_headroom"].keys():
testParams["pkts_num_margin"] = qosConfig["wm_pg_headroom"]["pkts_num_margin"]
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.PGHeadroomWatermarkTest", testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.PGHeadroomWatermarkTest",
+ testParams=testParams
+ )
@pytest.mark.parametrize("queueProfile", ["wm_q_shared_lossless", "wm_q_shared_lossy"])
- def testQosSaiQSharedWatermark(self, queueProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig, resetWatermark):
+ def testQosSaiQSharedWatermark(
+ self, queueProfile, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ resetWatermark
+ ):
"""
Test QoS SAI Queue shared watermark test for lossless/lossy traffic
@@ -568,9 +626,14 @@ def testQosSaiQSharedWatermark(self, queueProfile, ptfhost, dutTestParams, dutCo
})
if "packet_size" in qosConfig[queueProfile].keys():
testParams["packet_size"] = qosConfig[queueProfile]["packet_size"]
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.QSharedWatermarkTest", testParams=testParams)
-
- def testQosSaiDscpToPgMapping(self, request, ptfhost, dutTestParams, dutConfig):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.QSharedWatermarkTest",
+ testParams=testParams
+ )
+
+ def testQosSaiDscpToPgMapping(
+ self, request, ptfhost, dutTestParams, dutConfig,
+ ):
"""
Test QoS SAI DSCP to PG mapping ptf test
@@ -598,9 +661,15 @@ def testQosSaiDscpToPgMapping(self, request, ptfhost, dutTestParams, dutConfig):
"src_port_id": dutConfig["testPorts"]["src_port_id"],
"src_port_ip": dutConfig["testPorts"]["src_port_ip"],
})
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.DscpToPgMapping", testParams=testParams)
-
- def testQosSaiDwrrWeightChange(self, ptfhost, dutTestParams, dutConfig, dutQosConfig, updateSchedProfile):
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.DscpToPgMapping",
+ testParams=testParams
+ )
+
+ def testQosSaiDwrrWeightChange(
+ self, ptfhost, dutTestParams, dutConfig, dutQosConfig,
+ updateSchedProfile
+ ):
"""
Test QoS SAI DWRR runtime weight change
@@ -642,4 +711,6 @@ def testQosSaiDwrrWeightChange(self, ptfhost, dutTestParams, dutConfig, dutQosCo
"limit": qosConfig["wrr_chg"]["limit"],
"pkts_num_leak_out": qosConfig[portSpeedCableLength]["pkts_num_leak_out"],
})
- self.runPtfTest(ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams)
+ self.runPtfTest(
+ ptfhost, testCase="sai_qos_tests.WRRtest", testParams=testParams
+ )
diff --git a/tests/route/test_default_route.py b/tests/route/test_default_route.py
index 3a21568517f..4c8aef56945 100644
--- a/tests/route/test_default_route.py
+++ b/tests/route/test_default_route.py
@@ -16,7 +16,7 @@ def test_default_route_set_src(duthosts, enum_rand_one_per_hwsku_frontend_hostna
"""
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
- asichost = duthost.get_asic(enum_asic_index)
+ asichost = duthost.asic_instance(enum_asic_index)
config_facts = asichost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
@@ -52,7 +52,7 @@ def test_default_ipv6_route_next_hop_global_address(duthosts, enum_rand_one_per_
"""
duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
- asichost = duthost.get_asic(enum_asic_index)
+ asichost = duthost.asic_instance(enum_asic_index)
rtinfo = asichost.get_ip_route_info(ipaddress.ip_network(u"::/0"))
pytest_assert(rtinfo['nexthops'] > 0, "cannot find ipv6 nexthop for default route")
diff --git a/tests/route/test_route_perf.py b/tests/route/test_route_perf.py
index 2f9b3ba38ec..c3d0bc82ad5 100644
--- a/tests/route/test_route_perf.py
+++ b/tests/route/test_route_perf.py
@@ -20,7 +20,7 @@
ROUTE_TABLE_NAME = 'ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY'
@pytest.fixture(autouse=True)
-def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer):
+def ignore_expected_loganalyzer_exceptions(enum_rand_one_per_hwsku_frontend_hostname, loganalyzer):
"""
Ignore expected failures logs during test execution.
@@ -38,7 +38,7 @@ def ignore_expected_loganalyzer_exceptions(rand_one_dut_hostname, loganalyzer):
]
if loganalyzer:
# Skip if loganalyzer is disabled
- loganalyzer[rand_one_dut_hostname].ignore_regex.extend(ignoreRegex)
+ loganalyzer[enum_rand_one_per_hwsku_frontend_hostname].ignore_regex.extend(ignoreRegex)
@pytest.fixture(params=[4, 6])
def ip_versions(request):
@@ -126,12 +126,12 @@ def generate_route_file(duthost, prefixes, str_intf_nexthop, dir, op):
route_data.append(route_command)
# Copy json file to DUT
- duthost.copy(content=json.dumps(route_data, indent=4), dest=dir)
+ duthost.copy(content=json.dumps(route_data, indent=4), dest=dir, verbose=False)
def count_routes(host):
num = host.shell(
'sonic-db-cli ASIC_DB eval "return #redis.call(\'keys\', \'{}*\')" 0'.format(ROUTE_TABLE_NAME),
- module_ignore_errors=True)['stdout']
+ module_ignore_errors=True, verbose=True)['stdout']
return int(num)
def exec_routes(duthost, prefixes, str_intf_nexthop, op):
@@ -174,7 +174,8 @@ def _check_num_routes(expected_num_routes):
end_time = datetime.now()
# Check route entries are correct
- asic_route_keys = duthost.shell('sonic-db-cli ASIC_DB eval "return redis.call(\'keys\', \'{}*\')" 0'.format(ROUTE_TABLE_NAME))['stdout_lines']
+ asic_route_keys = duthost.shell('sonic-db-cli ASIC_DB eval "return redis.call(\'keys\', \'{}*\')" 0'\
+ .format(ROUTE_TABLE_NAME), verbose=False)['stdout_lines']
asic_prefixes = []
for key in asic_route_keys:
json_obj = key[len(ROUTE_TABLE_NAME) + 1 : ]
@@ -189,8 +190,8 @@ def _check_num_routes(expected_num_routes):
# Retuen time used for set/del routes
return (end_time - start_time).total_seconds()
-def test_perf_add_remove_routes(duthosts, rand_one_dut_hostname, request, ip_versions):
- duthost = duthosts[rand_one_dut_hostname]
+def test_perf_add_remove_routes(duthosts, enum_rand_one_per_hwsku_frontend_hostname, request, ip_versions):
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
# Number of routes for test
set_num_routes = request.config.getoption("--num_routes")
diff --git a/tests/run_tests.sh b/tests/run_tests.sh
index 692fb2613ed..13728441a42 100755
--- a/tests/run_tests.sh
+++ b/tests/run_tests.sh
@@ -90,6 +90,8 @@ function setup_environment()
export ANSIBLE_CONFIG=${BASE_PATH}/ansible
export ANSIBLE_LIBRARY=${BASE_PATH}/ansible/library/
export ANSIBLE_CONNECTION_PLUGINS=${BASE_PATH}/ansible/plugins/connection
+
+ rm -fr ${BASE_PATH}/tests/_cache
}
function setup_test_options()
diff --git a/tests/saitests/sai_qos_tests.py b/tests/saitests/sai_qos_tests.py
index 42cf12cd8cf..879e787e927 100644
--- a/tests/saitests/sai_qos_tests.py
+++ b/tests/saitests/sai_qos_tests.py
@@ -1,7 +1,6 @@
"""
SONiC Dataplane Qos tests
"""
-
import time
import logging
import ptf.packet as scapy
@@ -15,7 +14,8 @@
simple_arp_packet,
send_packet,
simple_tcp_packet,
- simple_qinq_tcp_packet)
+ simple_qinq_tcp_packet,
+ simple_ip_packet)
from ptf.mask import Mask
from switch import (switch_init,
sai_thrift_create_scheduler_profile,
@@ -60,6 +60,41 @@
ECN_INDEX_IN_HEADER = 53 # Fits the ptf hex_dump_buffer() parse function
DSCP_INDEX_IN_HEADER = 52 # Fits the ptf hex_dump_buffer() parse function
+def get_rx_port(dp, device_number, src_port_id, dst_mac, dst_ip, src_ip):
+ ip_id = 0xBABE
+ tos = (0 << 2) | 1
+ src_port_mac = dp.dataplane.get_mac(device_number, src_port_id)
+ pkt = simple_ip_packet(pktlen=64,
+ eth_dst=dst_mac,
+ eth_src=src_port_mac,
+ ip_src=src_ip,
+ ip_dst=dst_ip,
+ ip_tos=tos,
+ ip_id=ip_id)
+
+ send_packet(dp, src_port_id, pkt, 1)
+
+ exp_pkt = simple_ip_packet(pktlen=48,
+ eth_dst=dst_mac,
+ eth_src=src_port_mac,
+ ip_src=src_ip,
+ ip_dst=dst_ip,
+ ip_tos=tos,
+ ip_id=ip_id)
+
+ masked_exp_pkt = Mask(exp_pkt, ignore_extra_bytes=True)
+ masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
+ masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
+ masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "chksum")
+ masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
+ masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "len")
+
+ result = dp.dataplane.poll(device_number=0, exp_pkt=masked_exp_pkt, timeout=3)
+ if isinstance(result, dp.dataplane.PollFailure):
+ dp.fail("Expected packet was not received. Received on port:{} {}".format(result.port, result.format()))
+
+ return result.port
+
class ARPpopulate(sai_base_test.ThriftInterfaceDataPlane):
def setUp(self):
@@ -154,22 +189,33 @@ def runTest(self):
# DSCP to queue mapping
class DscpMappingPB(sai_base_test.ThriftInterfaceDataPlane):
+
def runTest(self):
switch_init(self.client)
- router_mac = self.test_params['router_mac']
+ router_mac = self.test_params['router_mac']
dst_port_id = int(self.test_params['dst_port_id'])
dst_port_ip = self.test_params['dst_port_ip']
dst_port_mac = self.dataplane.get_mac(0, dst_port_id)
src_port_id = int(self.test_params['src_port_id'])
src_port_ip = self.test_params['src_port_ip']
src_port_mac = self.dataplane.get_mac(0, src_port_id)
- print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id)
- print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip)
exp_ip_id = 101
exp_ttl = 63
+ pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac
+ print >> sys.stderr, "dst_port_id: %d, src_port_id: %d" % (dst_port_id, src_port_id)
+ # in case dst_port_id is part of LAG, find out the actual dst port
+ # for given IP parameters
+ dst_port_id = get_rx_port(
+ self, 0, src_port_id, pkt_dst_mac, dst_port_ip, src_port_ip
+ )
+ print >> sys.stderr, "actual dst_port_id: %d" % (dst_port_id)
+ print >> sys.stderr, "dst_port_mac: %s, src_port_mac: %s, src_port_ip: %s, dst_port_ip: %s" % (dst_port_mac, src_port_mac, src_port_ip, dst_port_ip)
+ print >> sys.stderr, "port list {}".format(port_list)
# Get a snapshot of counter values
+
+ time.sleep(10)
# port_results is not of our interest here
port_results, queue_results_base = sai_thrift_read_port_counters(self.client, port_list[dst_port_id])
@@ -178,8 +224,8 @@ def runTest(self):
for dscp in range(0, 64):
tos = (dscp << 2)
tos |= 1
- pkt = simple_tcp_packet(pktlen=64,
- eth_dst=router_mac if router_mac != '' else dst_port_mac,
+ pkt = simple_ip_packet(pktlen=64,
+ eth_dst=pkt_dst_mac,
eth_src=src_port_mac,
ip_src=src_port_ip,
ip_dst=dst_port_ip,
@@ -222,7 +268,8 @@ def runTest(self):
# dscp 48 -> queue 6
# So for the 64 pkts sent the mapping should be -> 58 queue 1, and 1 for queue0, queue2, queue3, queue4, queue5, and queue6
# Check results
- assert(queue_results[QUEUE_0] == 1 + queue_results_base[QUEUE_0])
+ # LAG ports can have LACP packets on queue 0, hence using >= comparison
+ assert(queue_results[QUEUE_0] >= 1 + queue_results_base[QUEUE_0])
assert(queue_results[QUEUE_1] == 58 + queue_results_base[QUEUE_1])
assert(queue_results[QUEUE_2] == 1 + queue_results_base[QUEUE_2])
assert(queue_results[QUEUE_3] == 1 + queue_results_base[QUEUE_3])
@@ -559,7 +606,7 @@ def runTest(self):
dst_port_ip = self.test_params['dst_port_ip']
dst_port_mac = self.dataplane.get_mac(0, dst_port_id)
max_buffer_size = int(self.test_params['buffer_max_size'])
- max_queue_size = int(self.test_params['queue_max_size'])
+ max_queue_size = int(self.test_params['queue_max_size'])
src_port_id = int(self.test_params['src_port_id'])
src_port_ip = self.test_params['src_port_ip']
src_port_mac = self.dataplane.get_mac(0, src_port_id)
@@ -568,18 +615,31 @@ def runTest(self):
pkts_num_trig_pfc = int(self.test_params['pkts_num_trig_pfc'])
pkts_num_trig_ingr_drp = int(self.test_params['pkts_num_trig_ingr_drp'])
+ pkt_dst_mac = router_mac if router_mac != '' else dst_port_mac
+
# Prepare TCP packet data
tos = dscp << 2
tos |= ecn
ttl = 64
default_packet_length = 64
- pkt = simple_tcp_packet(pktlen=default_packet_length,
- eth_dst=router_mac if router_mac != '' else dst_port_mac,
+ pkt = simple_ip_packet(pktlen=default_packet_length,
+ eth_dst=pkt_dst_mac,
eth_src=src_port_mac,
ip_src=src_port_ip,
ip_dst=dst_port_ip,
ip_tos=tos,
ip_ttl=ttl)
+
+ print >> sys.stderr, "test dst_port_id: {}, src_port_id: {}".format(
+ dst_port_id, src_port_id
+ )
+ # in case dst_port_id is part of LAG, find out the actual dst port
+ # for given IP parameters
+ dst_port_id = get_rx_port(
+ self, 0, src_port_id, pkt_dst_mac, dst_port_ip, src_port_ip
+ )
+ print >> sys.stderr, "actual dst_port_id: {}".format(dst_port_id)
+
# get a snapshot of counter values at recv and transmit ports
# queue_counters value is not of our interest here
recv_counters_base, queue_counters = sai_thrift_read_port_counters(self.client, port_list[src_port_id])
@@ -876,7 +936,7 @@ def setUp(self):
if self.pkts_num_trig_pfc:
print >> sys.stderr, ("pkts num: leak_out: %d, trig_pfc: %d, hdrm_full: %d, hdrm_partial: %d, pkt_size %d" % (self.pkts_num_leak_out, self.pkts_num_trig_pfc, self.pkts_num_hdrm_full, self.pkts_num_hdrm_partial, self.pkt_size))
elif self.pkts_num_trig_pfc_shp:
- print >> sys.stderr, ("pkts num: leak_out: {}, trig_pfc: {}, hdrm_full: {}, hdrm_partial: {}, pkt_size {}".format(self.pkts_num_leak_out, self.pkts_num_trig_pfc_shp, self.pkts_num_hdrm_full, self.pkts_num_hdrm_partial, self.pkt_size))
+ print >> sys.stderr, ("pkts num: leak_out: {}, trig_pfc: {}, hdrm_full: {}, hdrm_partial: {}, pkt_size {}".format(self.pkts_num_leak_out, self.pkts_num_trig_pfc_shp, self.pkts_num_hdrm_full, self.pkts_num_hdrm_partial, self.pkt_size))
# used only for headroom pool watermark
if all(key in self.test_params for key in ['hdrm_pool_wm_multiplier', 'buf_pool_roid', 'cell_size', 'max_headroom']):
@@ -1222,11 +1282,11 @@ def runTest(self):
class WRRtest(sai_base_test.ThriftInterfaceDataPlane):
def runTest(self):
- switch_init(self.client)
+ switch_init(self.client)
# Parse input parameters
ecn = int(self.test_params['ecn'])
- router_mac = self.test_params['router_mac']
+ router_mac = self.test_params['router_mac']
dst_port_id = int(self.test_params['dst_port_id'])
dst_port_ip = self.test_params['dst_port_ip']
dst_port_mac = self.dataplane.get_mac(0, dst_port_id)
diff --git a/tests/scripts/garp_service.py b/tests/scripts/garp_service.py
new file mode 100644
index 00000000000..3a52fd6730d
--- /dev/null
+++ b/tests/scripts/garp_service.py
@@ -0,0 +1,79 @@
+import argparse
+import json
+import ptf
+import ptf.testutils as testutils
+import time
+
+from ipaddress import ip_interface
+from scapy.all import conf
+from scapy.arch import get_if_hwaddr
+
+class GarpService:
+
+ def __init__(self, garp_config_file, interval):
+ self.garp_config_file = garp_config_file
+ self.interval = interval
+ self.packets = {}
+ self.dataplane = ptf.dataplane_instance
+
+ def gen_garp_packets(self):
+ '''
+ Read the config file and generate a GARP packet for each configured interface
+ '''
+
+ with open(self.garp_config_file) as f:
+ garp_config = json.load(f)
+
+ for port, config in garp_config.items():
+ intf_name = 'eth{}'.format(port)
+ source_mac = get_if_hwaddr(intf_name)
+ source_ip_str = config['target_ip']
+ source_ip = str(ip_interface(source_ip_str).ip)
+
+ # PTF uses Scapy to create packets, so this is ok to create
+ # packets through PTF even though we are using Scapy to send the packets
+ garp_pkt = testutils.simple_arp_packet(eth_src=source_mac,
+ hw_snd=source_mac,
+ ip_snd=source_ip,
+ ip_tgt=source_ip, # Re-use server IP as target IP, since it is within the subnet of the VLAN IP
+ arp_op=2)
+ self.packets[intf_name] = garp_pkt
+
+ def send_garp_packets(self):
+ '''
+ For each created GARP packet/interface pair, create an L2 socket.
+ Then send every packet through its associated socket according to the self.interval
+ '''
+ self.gen_garp_packets()
+
+ sockets = {}
+
+ for intf, packet in self.packets.items():
+ socket = conf.L2socket(iface=intf)
+ sockets[socket] = packet
+
+ try:
+ while True:
+ for socket, packet in sockets.items():
+ socket.send(packet)
+
+ if self.interval is None:
+ break
+
+ time.sleep(self.interval)
+
+ finally:
+ for socket in sockets.keys():
+ socket.close()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='GARP Service')
+ parser.add_argument('--conf', '-c', dest='conf_file', required=False, default='/tmp/garp_conf.json', action='store', help='The configuration file for GARP Service (default "/tmp/garp_conf.json")')
+ parser.add_argument('--interval', '-i', dest='interval', required=False, type=int, default=None, action='store', help='The interval at which to re-send GARP messages. If None or not specified, messages will only be set once at service startup')
+ args = parser.parse_args()
+ conf_file = args.conf_file
+ interval = args.interval
+
+ garp_service = GarpService(conf_file, interval)
+ garp_service.send_garp_packets()
diff --git a/tests/scripts/getbuild.py b/tests/scripts/getbuild.py
new file mode 100755
index 00000000000..ff0eed2d8b7
--- /dev/null
+++ b/tests/scripts/getbuild.py
@@ -0,0 +1,135 @@
+#!/usr/bin/env python3
+
+import json
+import time
+import sys
+import argparse
+from urllib.request import urlopen, urlretrieve
+
+_start_time = None
+_last_time = None
+artifact_size = 0
+def reporthook(count, block_size, total_size):
+ global _start_time, _last_time, artifact_size
+ cur_time = int(time.time())
+ if count == 0:
+ _start_time = cur_time
+ _last_time = cur_time
+ return
+
+ if cur_time == _last_time:
+ return
+
+ _last_time = cur_time
+
+ duration = cur_time - _start_time
+ progress_size = int(count * block_size)
+ speed = int(progress_size / (1024 * duration))
+ if total_size < 0 and artifact_size > 0:
+ total_size = artifact_size
+ if total_size > 0:
+ percent = int(count * block_size * 100 / total_size)
+ time_left = (total_size - progress_size) / speed / 1024
+ sys.stdout.write("\r...%d%%, %d(%d) MB, %d KB/s, %d seconds left..." %
+ (percent, progress_size / (1024 * 1024), total_size / (1024 * 1024), speed, time_left))
+ else:
+ sys.stdout.write("\r...%d MB, %d KB/s, ..." %
+ (progress_size / (1024 * 1024), speed))
+ sys.stdout.flush()
+
+def validate_url_or_abort(url):
+ # Attempt to retrieve HTTP response code
+ try:
+ urlfile = urlopen(url)
+ response_code = urlfile.getcode()
+ urlfile.close()
+ except IOError:
+ response_code = None
+
+ if not response_code:
+ print("Did not receive a response from remote machine. Aborting...")
+ sys.exit(1)
+ else:
+ # Check for a 4xx response code which indicates a nonexistent URL
+ if response_code / 100 == 4:
+ print("Image file not found on remote machine. Aborting...")
+ sys.exit(1)
+
+def get_download_url(buildid, artifact_name):
+ """get download url"""
+
+ artifact_url = "https://dev.azure.com/mssonic/build/_apis/build/builds/{}/artifacts?artifactName={}&api-version=5.0".format(buildid, artifact_name)
+
+ resp = urlopen(artifact_url)
+
+ j = json.loads(resp.read().decode('utf-8'))
+
+ download_url = j['resource']['downloadUrl']
+ artifact_size = int(j['resource']['properties']['artifactsize'])
+
+ return (download_url, artifact_size)
+
+
+def download_artifacts(url, content_type, platform, buildid):
+ """find latest successful build id for a branch"""
+
+ if content_type == 'image':
+ if platform == 'kvm':
+ filename = 'sonic-vs.img.gz'
+ else:
+ filename = "sonic-{}.bin".format(platform)
+
+ url = url.replace('zip', 'file')
+ url += "&subPath=%2Ftarget%2F{}".format(filename)
+ else:
+ filename = "{}.zip".format(platform)
+
+ if url.startswith('http://') or url.startswith('https://'):
+ print('Downloading {} from build {}...'.format(filename, buildid))
+ validate_url_or_abort(url)
+ try:
+ urlretrieve(url, filename, reporthook)
+ except Exception as e:
+ print("Download error", e)
+ sys.exit(1)
+
+def find_latest_build_id(branch):
+ """find latest successful build id for a branch"""
+
+ builds_url = "https://dev.azure.com/mssonic/build/_apis/build/builds?definitions=1&branchName=refs/heads/{}&resultFilter=succeeded&statusFilter=completed&api-version=6.0".format(branch)
+
+ resp = urlopen(builds_url)
+
+ j = json.loads(resp.read().decode('utf-8'))
+
+ latest_build_id = int(j['value'][0]['id'])
+
+ return latest_build_id
+
+def main():
+ global artifact_size
+
+ parser = argparse.ArgumentParser(description='Download artifacts from sonic azure devops.')
+ parser.add_argument('--buildid', metavar='buildid', type=int, help='build id')
+ parser.add_argument('--branch', metavar='branch', type=str, help='branch name')
+ parser.add_argument('--platform', metavar='platform', type=str,
+ choices=['broadcom', 'mellanox', 'kvm'],
+ help='platform to download')
+ parser.add_argument('--content', metavar='content', type=str,
+ choices=['all', 'image'], default='image',
+ help='download content type [all|image(default)]')
+ args = parser.parse_args()
+
+ if args.buildid is None:
+ buildid = find_latest_build_id(args.branch)
+ else:
+ buildid = int(args.buildid)
+
+ artifact_name = "sonic-buildimage.{}".format(args.platform)
+
+ (dl_url, artifact_size) = get_download_url(buildid, artifact_name)
+
+ download_artifacts(dl_url, args.content, args.platform, buildid)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/scripts/icmp_responder.py b/tests/scripts/icmp_responder.py
index 5dca3cf4c80..d68f369f5ee 100644
--- a/tests/scripts/icmp_responder.py
+++ b/tests/scripts/icmp_responder.py
@@ -52,7 +52,7 @@ def __call__(self):
for s in sel[0]:
packet = s.recv()
if packet is not None:
- if packet[ICMP].type == self.TYPE_ECHO_REQUEST and self.request_handler:
+ if ICMP in packet and packet[ICMP].type == self.TYPE_ECHO_REQUEST and self.request_handler:
self.request_handler(s, packet, self.dst_mac)
finally:
for s in self.sniff_sockets:
diff --git a/tests/show_techsupport/tech_support_cmds.py b/tests/show_techsupport/tech_support_cmds.py
new file mode 100644
index 00000000000..cdaccb39640
--- /dev/null
+++ b/tests/show_techsupport/tech_support_cmds.py
@@ -0,0 +1,208 @@
+import re
+
+ignore_list = {
+ "cp_proc_files": {},
+}
+
+copy_proc_files = [
+ "/proc/buddyinfo",
+ "/proc/cmdline",
+ "/proc/consoles",
+ "/proc/cpuinfo",
+ "/proc/devices",
+ "/proc/diskstats",
+ "/proc/dma",
+ "/proc/interrupts",
+ "/proc/iomem",
+ "/proc/ioports",
+ "/proc/kallsyms",
+ "/proc/loadavg",
+ "/proc/locks",
+ "/proc/meminfo",
+ "/proc/misc",
+ "/proc/modules",
+ "/proc/self/mounts",
+ "/proc/self/net",
+ "/proc/pagetypeinfo",
+ "/proc/partitions",
+ "/proc/sched_debug",
+ "/proc/slabinfo",
+ "/proc/softirqs",
+ "/proc/stat",
+ "/proc/swaps",
+ "/proc/sysvipc",
+ "/proc/timer_list",
+ "/proc/uptime",
+ "/proc/version",
+ "/proc/vmallocinfo",
+ "/proc/vmstat",
+ "/proc/zoneinfo",
+]
+
+show_platform_cmds = [
+ "show platform syseeprom",
+ "show platform psustatus",
+ "show platform ssdhealth",
+ "show platform temperature",
+ "show platform fan",
+ "show platform summary",
+]
+
+ip_cmds = [
+ "ip link",
+ "ip addr",
+ "ip rule",
+ "ip route show table all",
+ "ip neigh",
+ "ip -s neigh show nud noarp",
+]
+
+bridge_cmds = [
+ "bridge fdb show",
+ "bridge vlan show",
+]
+
+frr_cmds = [
+ "vtysh{} -c 'show running-config'",
+ "vtysh{} -c 'show ip route vrf all'",
+ "vtysh{} -c 'show ipv6 route vrf all'",
+ "vtysh{} -c 'show zebra fpm stats'",
+ "vtysh{} -c 'show zebra dplane detailed'",
+ "vtysh{} -c 'show interface vrf all'",
+ "vtysh{} -c 'show zebra client summary'",
+]
+
+
+bgp_cmds = [
+ "vtysh{} -c 'show ip bgp summary'",
+ "vtysh{} -c 'show ip bgp neighbors'",
+ "vtysh{} -c 'show ip bgp'",
+ "vtysh{} -c 'show bgp ipv6 summary'",
+ "vtysh{} -c 'show bgp ipv6 neighbors'",
+ "vtysh{} -c 'show bgp ipv6'",
+ re.compile('vtysh{}\s+-c "show ip bgp neighbors .* advertised-routes"'),
+ re.compile('vtysh{}\s+-c "show ip bgp neighbors .* routes"'),
+ re.compile('vtysh{}\s+-c "show bgp ipv6 neighbors .* advertised-routes"'),
+ re.compile('vtysh{}\s+-c "show bgp ipv6 neighbors .* routes"'),
+]
+
+nat_cmds = [
+ "iptables -t nat -nv -L",
+ "conntrack -j -L",
+ "conntrack -j -L | wc",
+ "conntrack -L",
+ "conntrack -L | wc",
+ "show nat config",
+]
+
+bfd_cmds = [
+ "vtysh{} -c 'show bfd peers'",
+ "vtysh{} -c 'show bfd peers counters'",
+ "vtysh{} -c 'show bfd peers json'",
+ "vtysh{} -c 'show bfd peers counters json'",
+]
+
+redis_db_cmds = [
+ "{}sonic-db-dump -n 'APPL_DB' -y",
+ "{}sonic-db-dump -n 'ASIC_DB' -y",
+ "{}sonic-db-dump -n 'COUNTERS_DB' -y",
+ "{}sonic-db-dump -n 'CONFIG_DB' -y",
+ "{}sonic-db-dump -n 'FLEX_COUNTER_DB' -y",
+ "{}sonic-db-dump -n 'STATE_DB' -y",
+ "{}sonic-db-dump -n 'COUNTERS_DB' -y",
+]
+
+docker_cmds = [
+ "docker exec -it syncd{} saidump",
+ "docker stats --no-stream",
+ "docker ps -a",
+ "docker top pmon",
+ "docker exec -it lldp{} lldpcli show statistics",
+ "docker logs bgp{}",
+ "docker logs swss{}",
+]
+
+misc_show_cmds = [
+ "show services",
+ "show reboot-cause",
+ "show vlan brief",
+ "show version",
+ "show interface status -d all",
+ "show interface transceiver presence",
+ "show interface transceiver eeprom --dom",
+ "show ip interface",
+ "show interface counters",
+ "{}show queue counters",
+ "{}netstat -i",
+ "{}ifconfig -a",
+]
+
+misc_cmds = [
+ "systemd-analyze blame",
+ "systemd-analyze dump",
+ "systemd-analyze plot",
+ "sensors",
+ "lspci -vvv -xx",
+ "lsusb -v",
+ "sysctl -a",
+ "lldpctl",
+ "ps aux",
+ "top -b -n 1",
+ "free",
+ "vmstat 1 5",
+ "vmstat -m",
+ "vmstat -s",
+ "mount",
+ "df",
+ "dmesg",
+ "cat /host/machine.conf",
+ "cp -r /etc",
+]
+
+copy_config_cmds = [
+ "cp .{}/buffers.json.j2",
+ "cp .{}/buffers_defaults",
+ "cp .{}/pg_profile_lookup.ini",
+ "cp .{}/port_config.ini",
+ "cp .{}/qos.json.j2",
+ "cp .{}/sai.profile",
+]
+
+broadcom_cmd_bcmcmd = [
+ 'bcmcmd{} -t5 version',
+ 'bcmcmd{} -t5 soc',
+ 'bcmcmd{} -t5 ps',
+ 'bcmcmd{} "l3 nat_ingress show"',
+ 'bcmcmd{} "l3 nat_egress show"',
+ 'bcmcmd{} "ipmc table show"',
+ 'bcmcmd{} "multicast show"',
+ 'bcmcmd{} "conf show"',
+ 'bcmcmd{} "fp show"',
+ 'bcmcmd{} "pvlan show"',
+ 'bcmcmd{} "l2 show"',
+ 'bcmcmd{} "l3 intf show"',
+ 'bcmcmd{} "l3 defip show"',
+ 'bcmcmd{} "l3 l3table show"',
+ 'bcmcmd{} "l3 egress show"',
+ 'bcmcmd{} "l3 ecmp egress show"',
+ 'bcmcmd{} "l3 multipath show"',
+ 'bcmcmd{} "l3 ip6host show"',
+ 'bcmcmd{} "l3 ip6route show"',
+ 'bcmcmd{} "mc show"',
+ 'bcmcmd{} "cstat *"',
+ 'bcmcmd{} "mirror show"',
+ 'bcmcmd{} "mirror dest show"',
+ 'bcmcmd{} "port *"',
+ 'bcmcmd{} "d chg my_station_tcam"',
+]
+
+broadcom_cmd_misc = [
+ "cat /proc/bcm/knet/debug",
+ "cat /proc/bcm/knet/dma",
+ "cat /proc/bcm/knet/link",
+ "cat /proc/bcm/knet/rate",
+ "cat /proc/bcm/knet/dstats",
+ "cat /proc/bcm/knet/stats",
+ "docker cp syncd{}:/var/log/bcm_diag_post",
+ "docker cp syncd{}:/var/log/diagrun.log",
+]
diff --git a/tests/show_techsupport/test_techsupport.py b/tests/show_techsupport/test_techsupport.py
index 79f3e99e9af..9a81b8b0976 100644
--- a/tests/show_techsupport/test_techsupport.py
+++ b/tests/show_techsupport/test_techsupport.py
@@ -1,12 +1,19 @@
-import pytest
import os
import pprint
-from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
+import pytest
import time
+
+import logging
+
from random import randint
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.plugins.loganalyzer.loganalyzer import LogAnalyzer, LogAnalyzerError
from tests.common.utilities import wait_until
+
from log_messages import *
-import logging
+
+import tech_support_cmds as cmds
+
logger = logging.getLogger(__name__)
pytestmark = [
@@ -240,7 +247,7 @@ def teardown_mirroring(dut, tmp_path):
dut.command('config mirror_session remove {}'.format(SESSION_INFO['name']))
-@pytest.fixture(scope='function', params=['acl', 'mirroring'], autouse=True)
+@pytest.fixture(scope='function', params=['acl', 'mirroring'])
def config(request):
"""
fixture to add configurations on setup by received parameters.
@@ -282,3 +289,152 @@ def test_techsupport(request, config, duthosts, enum_rand_one_per_hwsku_frontend
stdout = duthost.command("rm -rf {}".format(tar_file))
logger.debug("Sleeping for {} seconds".format(loop_delay))
time.sleep(loop_delay)
+
+
+def add_asic_arg(format_str, cmds_list, asic_num):
+ """
+ Add ASIC specific arg using the supplied string formatter
+
+ New commands are added for each ASIC. In case of a regex
+ paramter, new regex is created for each ASIC.
+ """
+ updated_cmds = []
+ for cmd in cmds_list:
+ if isinstance(cmd, str):
+ if "{}" in cmd:
+ if asic_num == 1:
+ updated_cmds.append(cmd.format(""))
+ else:
+ for asic in range(0, asic_num):
+ asic_arg = format_str.format(asic)
+ updated_cmds.append(cmd.format(asic_arg))
+ else:
+ updated_cmds.append(cmd)
+ else:
+ if "{}" in cmd.pattern:
+ if asic_num == 1:
+ mod_pattern = cmd.pattern.format("")
+ updated_cmds.append(re.compile(mod_pattern))
+ else:
+ for asic in range(0, asic_num):
+ asic_arg = format_str.format(asic)
+ mod_pattern = cmd.pattern.format(asic_arg)
+ updated_cmds.append(re.compile(mod_pattern))
+ else:
+ updated_cmds.append(cmd)
+ return updated_cmds
+
+
+@pytest.fixture(scope='function')
+def commands_to_check(duthosts, enum_rand_one_per_hwsku_frontend_hostname):
+ """
+ Prepare a list of commands to be expected in the
+ show techsupport output. All the expected commands are
+ categorized into groups.
+
+ For multi ASIC platforms, command strings are generated based on
+ the number of ASICs.
+
+ Also adds hardware specific commands
+
+ Returns:
+ A dict of command groups with each group containing a list of commands
+ """
+
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+ num = duthost.num_asics()
+
+ cmds_to_check = {
+ "cp_proc_files": cmds.copy_proc_files,
+ "show_platform_cmds": cmds.show_platform_cmds,
+ "ip_cmds": cmds.ip_cmds,
+ "bridge_cmds": cmds.bridge_cmds,
+ "frr_cmds": add_asic_arg(" -n {}", cmds.frr_cmds, num),
+ "bgp_cmds": add_asic_arg(" -n {}", cmds.bgp_cmds, num),
+ "nat_cmds": cmds.nat_cmds,
+ "bfd_cmds": add_asic_arg(" -n {}", cmds.bfd_cmds, num),
+ "redis_db_cmds": add_asic_arg("asic{} ", cmds.redis_db_cmds, num),
+ "docker_cmds": add_asic_arg("{}", cmds.docker_cmds, num),
+ "misc_show_cmds": add_asic_arg("asic{} ", cmds.misc_show_cmds, num),
+ "misc_cmds": cmds.misc_cmds,
+ }
+
+ if duthost.facts["asic_type"] == "broadcom":
+ cmds_to_check.update(
+ {
+ "broadcom_cmd_bcmcmd":
+ add_asic_arg(" -n {}", cmds.broadcom_cmd_bcmcmd, num),
+ "broadcom_cmd_misc":
+ add_asic_arg("{}", cmds.broadcom_cmd_misc, num),
+ "copy_config_cmds":
+ add_asic_arg("/{}", cmds.copy_config_cmds, num),
+ }
+ )
+
+ return cmds_to_check
+
+
+def check_cmds(cmd_group_name, cmd_group_to_check, cmdlist):
+ """
+ Check commands within a group against the command list
+
+ Returns: list commands not found
+ """
+
+ cmd_not_found = defaultdict(list)
+ ignore_set = cmds.ignore_list.get(cmd_group_name)
+ for cmd_name in cmd_group_to_check:
+ found = False
+ cmd_str = cmd_name if isinstance(cmd_name, str) else cmd_name.pattern
+ logger.info("Checking for {}".format(cmd_str))
+
+ for command in cmdlist:
+ if isinstance(cmd_name, str):
+ result = cmd_name in command
+ else:
+ result = cmd_name.search(command)
+ if result:
+ found = True
+ break
+
+ if not found:
+ if not ignore_set or cmd_str not in ignore_set:
+ cmd_not_found[cmd_group_name].append(cmd_str)
+
+ return cmd_not_found
+
+
+def test_techsupport_commands(
+ duthosts, enum_rand_one_per_hwsku_frontend_hostname, commands_to_check
+):
+ """
+ This test checks list of commands that will be run when executing
+ 'show techsupport' CLI against a standard expected list of commands
+ to run.
+
+ The test invokes show techsupport with noop option, which just
+ returns the list of commands that will be run when collecting
+ tech support data.
+
+ Args:
+ commands_to_check: contains a dict of command groups with each
+ group containing a list of related commands.
+ """
+
+ cmd_not_found = defaultdict(list)
+ duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]
+
+ stdout = duthost.shell(
+ 'sudo generate_dump -n | grep -v "^mkdir\|^rm\|^tar\|^gzip"'
+ )
+
+ pytest_assert(stdout['rc'] == 0, 'generate_dump command failed')
+
+ cmd_list = stdout["stdout_lines"]
+
+ for cmd_group_name, cmd_group_to_check in commands_to_check.items():
+ cmd_not_found.update(
+ check_cmds(cmd_group_name, cmd_group_to_check, cmd_list)
+ )
+
+ pytest_assert(len(cmd_not_found) == 0, cmd_not_found)
diff --git a/tests/snmp/conftest.py b/tests/snmp/conftest.py
index 93a6c68a75c..96c22a12b01 100644
--- a/tests/snmp/conftest.py
+++ b/tests/snmp/conftest.py
@@ -5,3 +5,14 @@
def setup_check_snmp_ready(duthosts, rand_one_dut_hostname):
duthost = duthosts[rand_one_dut_hostname]
assert wait_until(300, 20, duthost.is_service_fully_started, "snmp"), "SNMP service is not running"
+
+def pytest_addoption(parser):
+ """
+ Adds options to pytest that are used by the snmp tests.
+ """
+ parser.addoption(
+ "--percentage",
+ action="store",
+ default=False,
+ help="Set percentage difference for snmp test",
+ type=int)
diff --git a/tests/snmp/memory.py b/tests/snmp/memory.py
new file mode 100644
index 00000000000..b370d6768a9
--- /dev/null
+++ b/tests/snmp/memory.py
@@ -0,0 +1,3 @@
+#!/usr/bin/python
+load = [' ' * 512000000]
+print(load)
diff --git a/tests/snmp/test_snmp_default_route.py b/tests/snmp/test_snmp_default_route.py
new file mode 100644
index 00000000000..a11c6fe5c6c
--- /dev/null
+++ b/tests/snmp/test_snmp_default_route.py
@@ -0,0 +1,44 @@
+import pytest
+
+pytestmark = [
+ pytest.mark.topology('any'),
+ pytest.mark.device_type('vs')
+]
+
+
+@pytest.mark.bsl
+def test_snmp_default_route(duthosts, enum_dut_hostname, localhost, creds):
+ """compare the snmp facts between observed states and target state"""
+
+ duthost = duthosts[enum_dut_hostname]
+ hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+ snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts']
+ dut_result = duthost.shell('show ip route 0.0.0.0/0 | grep "\*"')
+
+ dut_result_nexthops = []
+ # ipCidrRouteEntry MIB for default route will have entries
+ # where next hop are not eth0 interface.
+ for line in dut_result['stdout_lines']:
+ if 'via' in line:
+ ip, interface = line.split('via')
+ ip = ip.strip("*, ")
+ interface = interface.strip("*, ")
+ if interface != "eth0":
+ dut_result_nexthops.append(ip)
+
+ # If show ip route 0.0.0.0/0 has route only via eth0,
+ # or has no route snmp_facts for ip_cidr_route
+ # will be empty.
+ if len(dut_result_nexthops) == 0:
+ assert 'snmp_cidr_route' not in snmp_facts, 'snmp_cidr_route should not be present in snmp_facts'
+
+ if len(dut_result_nexthops) != 0:
+ # Test to ensure show ip route 0.0.0.0/0 result matches with SNMP result
+ for ip in dut_result_nexthops:
+ assert ip in snmp_facts['snmp_cidr_route'], "{} ip not found in snmp_facts".format(ip)
+ assert snmp_facts['snmp_cidr_route'][ip]['route_dest'] == '0.0.0.0', "Incorrect route_dest for {} ip".format(ip)
+ assert snmp_facts['snmp_cidr_route'][ip]['status'] == '1', "Incorrect status for {} ip".format(ip)
+
+ # Compare the length of routes in CLI output and SNMP facts
+ assert len(snmp_facts['snmp_cidr_route'].keys()) == len(snmp_facts['snmp_cidr_route'].keys()), \
+ "Number or route entries in SNMP does not match with cli"
diff --git a/tests/snmp/test_snmp_interfaces.py b/tests/snmp/test_snmp_interfaces.py
index a05f4030d4e..4d0af396272 100644
--- a/tests/snmp/test_snmp_interfaces.py
+++ b/tests/snmp/test_snmp_interfaces.py
@@ -6,26 +6,39 @@
]
@pytest.mark.bsl
-def test_snmp_interfaces(duthosts, rand_one_dut_hostname, localhost, creds):
- """compare the bgp facts between observed states and target state"""
- duthost = duthosts[rand_one_dut_hostname]
-
+def test_snmp_interfaces(localhost, creds, duthosts, enum_dut_hostname, enum_asic_index):
+ """compare the snmp facts between observed states and target state"""
+ duthost = duthosts[enum_dut_hostname]
hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+ namespace = duthost.get_namespace_from_asic_id(enum_asic_index)
+ config_facts = duthost.config_facts(host=duthost.hostname, source="persistent", namespace=namespace)['ansible_facts']
snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts']
- config_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts']
snmp_ifnames = [ v['name'] for k, v in snmp_facts['snmp_interfaces'].items() ]
print snmp_ifnames
# Verify all physical ports in snmp interface list
for _, alias in config_facts['port_name_to_alias_map'].items():
- assert alias in snmp_ifnames
+ assert alias in snmp_ifnames, "Interface not found in SNMP facts."
# Verify all port channels in snmp interface list
for po_name in config_facts.get('PORTCHANNEL', {}):
- assert po_name in snmp_ifnames
+ assert po_name in snmp_ifnames, "PortChannel not found in SNMP facts."
+
+@pytest.mark.bsl
+def test_snmp_mgmt_interface(localhost, creds, duthosts, enum_dut_hostname):
+ """compare the snmp facts between observed states and target state"""
+
+ duthost = duthosts[enum_dut_hostname]
+ hostip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+
+ snmp_facts = localhost.snmp_facts(host=hostip, version="v2c", community=creds["snmp_rocommunity"])['ansible_facts']
+ config_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts']
+
+ snmp_ifnames = [ v['name'] for k, v in snmp_facts['snmp_interfaces'].items() ]
+ print snmp_ifnames
# Verify management port in snmp interface list
for name in config_facts.get('MGMT_INTERFACE', {}):
- assert name in snmp_ifnames
+ assert name in snmp_ifnames, "Management Interface not found in SNMP facts."
diff --git a/tests/snmp/test_snmp_memory.py b/tests/snmp/test_snmp_memory.py
new file mode 100644
index 00000000000..50112b765eb
--- /dev/null
+++ b/tests/snmp/test_snmp_memory.py
@@ -0,0 +1,83 @@
+"""
+Test SNMP memory MIB in SONiC.
+Parameters:
+ --percentage: Set optional percentege of difference for test
+"""
+
+import pytest
+from tests.common.helpers.assertions import pytest_assert # pylint: disable=import-error
+pytestmark = [
+ pytest.mark.topology('any')
+]
+
+CALC_DIFF = lambda snmp, sys_data: float(abs(snmp - int(sys_data)) * 100) / float(snmp)
+
+@pytest.fixture(autouse=True, scope="module")
+def get_parameter(request):
+ """
+ Get optional parameter percentage or return default 4%
+ """
+ global percent
+ percent = request.config.getoption("--percentage") or 4
+ return percent
+
+@pytest.fixture()
+def load_memory(duthosts, rand_one_dut_hostname):
+ """
+ Execute script in background to load memory
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ duthost.copy(src='snmp/memory.py', dest='/tmp/memory.py')
+ duthost.shell("nohup python /tmp/memory.py > /dev/null 2>&1 &")
+ yield
+ duthost.shell("killall python /tmp/memory.py", module_ignore_errors=True)
+
+def collect_memory(duthosts, rand_one_dut_hostname):
+ """
+ Collect memory data from DUT
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ facts = {}
+ output = duthost.shell("cat /proc/meminfo")['stdout_lines']
+ for line in output:
+ split = line.split()
+ facts.update({split[0].replace(":", ""): split[-2]})
+ return facts
+
+def test_snmp_memory(duthosts, rand_one_dut_hostname, localhost, creds):
+ """
+ Verify if memory MIB equals to data collected from DUT
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+ snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c",
+ community=creds["snmp_rocommunity"])['ansible_facts']
+ facts = collect_memory(duthosts, rand_one_dut_hostname)
+ compare = (('ansible_sysTotalFreeMemery', 'MemFree'), ('ansible_sysTotalBuffMemory', 'Buffers'),
+ ('ansible_sysCachedMemory', 'Cached'))
+
+ # Verify correct behaviour of sysTotalMemery, sysTotalSharedMemory
+ pytest_assert(not abs(snmp_facts['ansible_sysTotalMemery'] - int(facts['MemTotal'])),
+ "Unexpected res sysTotalMemery {}".format(snmp_facts['ansible_sysTotalMemery']))
+ pytest_assert(not abs(snmp_facts['ansible_sysTotalSharedMemory'] - int(facts['Shmem'])),
+ "Unexpected res sysTotalSharedMemory {}".format(snmp_facts['ansible_sysTotalSharedMemory']))
+
+ # Verify correct behaviour of sysTotalFreeMemery, sysTotalBuffMemory, sysCachedMemory
+ snmp_diff = [snmp for snmp, sys_data in compare if CALC_DIFF(snmp_facts[snmp],
+ facts[sys_data]) > percent]
+ pytest_assert(not snmp_diff,
+ "Snmp memory MIBs: {} differs more than {} %".format(snmp_diff, percent))
+
+
+def test_snmp_memory_load(duthosts, rand_one_dut_hostname, localhost, creds, load_memory):
+ """
+ Verify SNMP total free memory matches DUT results in stress test
+ """
+ # Start memory stress generation
+ duthost = duthosts[rand_one_dut_hostname]
+ host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+ snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c",
+ community=creds["snmp_rocommunity"])['ansible_facts']
+ mem_free = duthost.shell("grep MemFree /proc/meminfo | awk '{print $2}'")['stdout']
+ pytest_assert(CALC_DIFF(snmp_facts['ansible_sysTotalFreeMemery'], mem_free) < percent,
+ "sysTotalFreeMemery differs by more than {}".format(percent))
diff --git a/tests/snmp/test_snmp_phy_entity.py b/tests/snmp/test_snmp_phy_entity.py
index 7a88d62bf28..ea39839f1e3 100644
--- a/tests/snmp/test_snmp_phy_entity.py
+++ b/tests/snmp/test_snmp_phy_entity.py
@@ -444,20 +444,20 @@ def test_turn_off_pdu_and_check_psu_info(duthost, localhost, creds, pdu_controll
pytest.skip('At least 2 outlets required for rest of the testing in this case')
# turn on all PSU
- for item in outlet_status:
- if not item['outlet_on']:
- pdu_controller.turn_on_outlet(item["outlet_id"])
+ for outlet in outlet_status:
+ if not outlet['outlet_on']:
+ pdu_controller.turn_on_outlet(outlet)
time.sleep(5)
outlet_status = pdu_controller.get_outlet_status()
- for item in outlet_status:
- if not item['outlet_on']:
+ for outlet in outlet_status:
+ if not outlet['outlet_on']:
pytest.skip('Not all outlet are powered on, skip rest of the testing in this case')
# turn off the first PSU
- first_outlet_id = outlet_status[0]['outlet_id']
- pdu_controller.turn_off_outlet(first_outlet_id)
- assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet_id, False)
+ first_outlet = outlet_status[0]
+ pdu_controller.turn_off_outlet(first_outlet)
+ assert wait_until(30, 5, check_outlet_status, pdu_controller, first_outlet, False)
# wait for psud update the database
assert wait_until(120, 20, _check_psu_status_after_power_off, duthost, localhost, creds)
@@ -581,13 +581,13 @@ def is_null_str(value):
return not value or value == str(None) or value == 'N/A'
-def check_outlet_status(pdu_controller, outlet_id, expect_status):
+def check_outlet_status(pdu_controller, outlet, expect_status):
"""
Check if a given PSU is at expect status
:param pdu_controller: PDU controller
- :param outlet_id: outlet id
+ :param outlet: PDU outlet
:param expect_status: Expect bool status, True means on, False means off
:return: True if a given PSU is at expect status
"""
- status = pdu_controller.get_outlet_status(outlet_id)
+ status = pdu_controller.get_outlet_status(outlet)
return 'outlet_on' in status[0] and status[0]['outlet_on'] == expect_status
diff --git a/tests/snmp/test_snmp_v2mib.py b/tests/snmp/test_snmp_v2mib.py
new file mode 100644
index 00000000000..cdc41ecd3dd
--- /dev/null
+++ b/tests/snmp/test_snmp_v2mib.py
@@ -0,0 +1,46 @@
+"""
+Test SNMPv2MIB in SONiC.
+"""
+
+import pytest
+from tests.common.helpers.assertions import pytest_assert # pylint: disable=import-error
+
+pytestmark = [
+ pytest.mark.topology('any')
+]
+
+
+def test_snmp_v2mib(duthosts, rand_one_dut_hostname, localhost, creds):
+ """
+ Verify SNMPv2-MIB objects are functioning properly
+ """
+ duthost = duthosts[rand_one_dut_hostname]
+ host_ip = duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']
+ snmp_facts = localhost.snmp_facts(host=host_ip, version="v2c",
+ community=creds["snmp_rocommunity"])['ansible_facts']
+ dut_facts = duthost.setup()['ansible_facts']
+ debian_ver = duthost.shell('cat /etc/debian_version')['stdout']
+ cmd = 'docker exec snmp grep "sysContact" /etc/snmp/snmpd.conf'
+ sys_contact = " ".join(duthost.shell(cmd)['stdout'].split()[1:])
+ sys_location = duthost.shell("grep 'snmp_location' /etc/sonic/snmp.yml")['stdout'].split()[-1]
+
+ expected_res = {'kernel_version': dut_facts['ansible_kernel'],
+ 'hwsku': duthost.facts['hwsku'],
+ 'os_version': 'SONiC.{}'.format(duthost.os_version),
+ 'debian_version': '{} {}'.format(dut_facts['ansible_distribution'], debian_ver)}
+
+ #Verify that sysName, sysLocation and sysContact MIB objects functions properly
+ pytest_assert(snmp_facts['ansible_sysname'] == duthost.hostname,
+ "Unexpected MIB result {}".format(snmp_facts['ansible_sysname']))
+ pytest_assert(snmp_facts['ansible_syslocation'] == sys_location,
+ "Unexpected MIB result {}".format(snmp_facts['ansible_syslocation']))
+ pytest_assert(snmp_facts['ansible_syscontact'] == sys_contact,
+ "Unexpected MIB result {}".format(snmp_facts['ansible_syscontact']))
+
+ #Verify that sysDescr MIB object functions properly
+ missed_values = []
+ for system_value in expected_res:
+ if expected_res[system_value] not in snmp_facts['ansible_sysdescr']:
+ missed_values.append(expected_res[system_value])
+ pytest_assert(not missed_values, "System values {} was not found in SNMP facts: {}"
+ .format(missed_values, snmp_facts['ansible_sysdescr']))
diff --git a/tests/sub_port_interfaces/Sub-ports-test-plan.md b/tests/sub_port_interfaces/Sub-ports-test-plan.md
index 81fd622801e..203dd4403d8 100644
--- a/tests/sub_port_interfaces/Sub-ports-test-plan.md
+++ b/tests/sub_port_interfaces/Sub-ports-test-plan.md
@@ -1,6 +1,6 @@
# Sub-port interfaces Test Plan
-## Rev 0.1
+## Rev 0.3
- [Revision](#revision)
- [Overview](#overview)
@@ -10,13 +10,17 @@
- [Test Cases](#Test-cases)
- [test_packet_routed_with_valid_vlan](#Test-case-test_packet_routed_with_valid_vlan)
- [test_packet_routed_with_invalid_vlan](#Test-case-test_packet_routed_with_invalid_vlan)
+ - [test_admin_status_down_disables_forwarding](#Test-case-test_admin_status_down_disables_forwarding)
+ - [test_max_numbers_of_sub_ports](#Test-case-test_max_numbers_of_sub_ports)
+ - [test_mtu_inherited_from_parent_port](#Test-case-test_mtu_inherited_from_parent_port)
+ - [test_vlan_config_impact](#Test-case-test_vlan_config_impact)
## Revision
-| Rev | Date | Author | Change Description |
-|:---:|:-----------:|:-----------------------|:-----------------------------------|
-| 0.1 | 30/11/2020 | BFN: Oleksandr Kozodoi | Initial version |
+| Rev | Date | Author | Change Description |
+|:---:|:-----------:|:-------------------------|:-----------------------------------|
+| 0.3 | 02/23/2021 | Intel: Oleksandr Kozodoi | Initial version |
## Overview
@@ -31,12 +35,12 @@ Purpose of the test is to verify a SONiC switch system correctly performs sub-po
## Testbed
-Supported topologies: t0
+Supported topologies: t0, t1
## Setup configuration
Each sub-ports test case needs traffic transmission.
-Traffic starts transmission, if DUT and PTF directly connected interfaces have the same VLAN IDs. So we need configure correct sub-ports on the DUT and PTF.
+Traffic starts transmission, if DUT and PTF directly connected interfaces have the same VLAN IDs. So we need configure correct sub-ports on the DUT and PTF.
For example the customized testbed with applied T0 topo for test_packet_routed test case looks as follows:
@@ -58,7 +62,7 @@ For example the customized testbed with applied T0 topo for test_packet_routed t
|_________________________________|
```
-Port mapping:
+#### Port mapping for port:
| DUT | | PTF | |
|:----------:|:-----------:|:-----------|:------------|
|**Sub-port**|**IP** |**Sub-port**|**IP** |
@@ -67,6 +71,15 @@ Port mapping:
|Ethernet8.10|172.16.4.1/30|eth2.10 |172.16.4.2/30|
|Ethernet8.20|172.16.4.5/30|eth2.20 |172.16.4.6/30|
+#### Port mapping for port in LAG:
+| DUT | | PTF | |
+|:----------:|:-----------:|:-----------|:------------|
+|**Sub-port**|**IP** |**Sub-port**|**IP** |
+|PortChannel1.10|172.16.0.1/30|bond1.10 |172.16.0.2/30|
+|PortChannel1.20|172.16.0.5/30|bond1.20 |172.16.0.6/30|
+|PortChannel2.10|172.16.4.1/30|bond2.10 |172.16.4.2/30|
+|PortChannel2.20|172.16.4.5/30|bond2.20 |172.16.4.6/30|
+
After end of the test session teardown procedure turns testbed to the initial state.
## Python scripts to setup and run test
@@ -75,6 +88,8 @@ Sub-ports test suite is located in tests/sub_port_interfaces folder. There is on
### Setup of DUT switch
+Parent ports of sub-ports are members of Vlan1000 in the t0 topology. So we need to remove parent ports from Vlan1000 before tests running.
+
During setup procedure python mgmt scripts perform DUT configuration via jinja template to convert it in to the JSON file containing configuration to be pushed to the SONiC config DB via sonic-cfggen. Setup procedure configures sub-port interfaces with fixture ```define_sub_ports_configuration```.
sub_port_config.j2
@@ -90,6 +105,7 @@ sub_port_config.j2
}
}
```
+Also, all test cases support LAG ports. So we need to configure additional PortChannel ports on the DUT and bond ports on the PTF before test running. We should use ```create_lag_port``` function and ```create_bond_port``` function for this. [Port mapping for port in LAG](#Port-mapping-for-port-in-LAG).
## Test cases
@@ -140,3 +156,108 @@ DUT and PTF directly connected interfaces have different VLAN IDs
- reload_dut_config function: reload DUT configuration
- reload_ptf_config function: remove all sub-ports configuration
+
+## Test case test_admin_status_down_disables_forwarding
+
+### Test objective
+
+Validates that admin status DOWN disables packet forwarding.
+
+### Test set up
+- apply_config_on_the_dut fixture(scope="function"): enable and configures sub-port interfaces on the DUT
+- apply_config_on_the_ptf fixture(scope="function"): enable and configures sub-port interfaces on the PTF
+
+### Test steps
+
+- Setup configuration of sub-ports on the DUT.
+- Setup configuration of sub-ports on the PTF.
+- Shutdown sub-ports on the DUT
+- Create ICMP packet.
+- Send ICMP request packet from PTF to DUT.
+- Verify that DUT doesn't send ICMP reply packet to PTF.
+- Create ICMP packet.
+- Send ICMP request packet from PTF to another sub-port of DUT.
+- Verify that DUT sends ICMP reply packet to PTF.
+- Startup sub-port on the DUT
+- Create ICMP packet.
+- Send ICMP request packet from PTF to DUT.
+- Verify that DUT sends ICMP reply packet to PTF.
+- Clear configuration of sub-ports on the DUT.
+- Clear configuration of sub-ports on the PTF.
+
+### Test teardown
+
+- reload_dut_config function: reload DUT configuration
+- reload_ptf_config function: remove all sub-ports configuration
+- teardown_test_class function: reload DUT configuration after running of test suite
+
+## Test case test_max_numbers_of_sub_ports
+
+### Test objective
+
+Validates that 256 sub-ports can be created per port or LAG.
+
+### Test set up
+- apply_config_on_the_dut fixture(scope="function"): enable and configures sub-port interfaces on the DUT
+- apply_config_on_the_ptf fixture(scope="function"): enable and configures sub-port interfaces on the PTF
+
+### Test steps
+
+- Setup configuration of 256 sub-ports on the DUT.
+- Setup configuration of 256 sub-ports on the PTF.
+- Create ICMP packet.
+- Send ICMP request packet from PTF to DUT.
+- Verify that DUT sends ICMP reply packet to PTF.
+- Clear configuration of sub-ports on the DUT.
+- Clear configuration of sub-ports on the PTF.
+
+### Test teardown
+
+- reload_dut_config function: reload DUT configuration
+- reload_ptf_config function: remove all sub-ports configuration
+
+## Test case test_mtu_inherited_from_parent_port
+
+### Test objective
+
+Validates that MTU settings of sub-ports inherited from parent port.
+
+### Test set up
+- apply_config_on_the_dut fixture(scope="function"): enable and configures sub-port interfaces on the DUT
+
+### Test steps
+
+- Setup correct configuration of sub-ports on the DUT.
+- Get MTU value of sub-port
+- Get MTU value of parent port
+- Clear configuration of sub-ports on the DUT.
+
+### Test teardown
+
+- reload_dut_config function: reload DUT configuration
+
+## Test case test_vlan_config_impact
+
+### Test objective
+
+Validates that removal of VLAN doesn't impact sub-port RIF with same VLAN ID.
+
+### Test set up
+- apply_config_on_the_dut fixture(scope="function"): enable and configures sub-port interfaces on the DUT
+- apply_config_on_the_ptf fixture(scope="function"): enable and configures sub-port interfaces on the PTF
+
+### Test steps
+
+- Setup correct configuration of sub-ports on the DUT.
+- Create a VLAN RIF with the same VLAN ID of sub-port.
+- Added PortChannel interface to VLAN members
+- Delete a VLAN RIF.
+- Make sure sub-port is available in redis-db.
+- Verify that DUT sends ICMP reply packet to PTF.
+- Clear configuration of sub-ports on the DUT.
+- Clear configuration of sub-ports on the PTF.
+
+### Test teardown
+
+- reload_dut_config function: reload DUT configuration
+- reload_ptf_config function: remove all sub-ports configuration
\ No newline at end of file
diff --git a/tests/sub_port_interfaces/conftest.py b/tests/sub_port_interfaces/conftest.py
index cc8c25fdf30..dc1aa18a8b1 100644
--- a/tests/sub_port_interfaces/conftest.py
+++ b/tests/sub_port_interfaces/conftest.py
@@ -1,17 +1,38 @@
import os
import ipaddress
+import time
import jinja2
import pytest
from tests.common import config_reload
+from tests.common.helpers.assertions import pytest_assert as py_assert
+from tests.common.utilities import wait_until
from sub_ports_helpers import DUT_TMP_DIR
from sub_ports_helpers import TEMPLATE_DIR
from sub_ports_helpers import SUB_PORTS_TEMPLATE
+from sub_ports_helpers import check_sub_port
+from sub_ports_helpers import remove_member_from_vlan
+from sub_ports_helpers import get_port
+from sub_ports_helpers import remove_sub_port
+from sub_ports_helpers import remove_lag_port
-@pytest.fixture
-def define_sub_ports_configuration(request, duthost, ptfhost):
+def pytest_addoption(parser):
+ """
+ Adds options to pytest that are used by the sub-ports tests.
+ """
+ parser.addoption(
+ "--max_numbers_of_sub_ports",
+ action="store",
+ type=int,
+ default=4,
+ help="Max numbers of sub-ports for test_max_numbers_of_sub_ports test case",
+ )
+
+
+@pytest.fixture(params=['port', 'port_in_lag'])
+def define_sub_ports_configuration(request, duthost, ptfhost, ptfadapter):
"""
Define configuration of sub-ports for TC run
@@ -36,26 +57,36 @@ def define_sub_ports_configuration(request, duthost, ptfhost):
}
}
"""
+ sub_ports_config = {}
+ max_numbers_of_sub_ports = request.config.getoption("--max_numbers_of_sub_ports")
vlan_ranges_dut = range(10, 30, 10)
vlan_ranges_ptf = range(10, 30, 10)
if 'invalid' in request.node.name:
vlan_ranges_ptf = range(11, 31, 10)
- interface_ranges = range(1, 2)
+ if 'max_numbers' in request.node.name:
+ vlan_ranges_dut = range(1, max_numbers_of_sub_ports + 1)
+ vlan_ranges_ptf = range(1, max_numbers_of_sub_ports + 1)
+
+ # Linux has the limitation of 15 characters on an interface name,
+ # but name of LAG port should have prefix 'PortChannel' and suffix
+ # '<0-9999>' on SONiC. So max length of LAG port suffix have be 3 characters
+ # For example: 'PortChannel1.99'
+ if request.param == 'port_in_lag':
+ max_numbers_of_sub_ports = max_numbers_of_sub_ports if max_numbers_of_sub_ports <= 99 else 99
+ vlan_ranges_dut = range(1, max_numbers_of_sub_ports + 1)
+ vlan_ranges_ptf = range(1, max_numbers_of_sub_ports + 1)
+
+ interface_ranges = range(1, 3)
ip_subnet = u'172.16.0.0/16'
prefix = 30
subnet = ipaddress.ip_network(ip_subnet)
- cfg_facts = duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts']
- config_vlan_members = cfg_facts['VLAN_MEMBER']['Vlan1000']
- config_port_indices = {v: k for k, v in cfg_facts['port_index_map'].items() if k in config_vlan_members and v in interface_ranges}
- ptf_ports_available_in_topo = ptfhost.host.options['variable_manager'].extra_vars.get("ifaces_map")
- ptf_ports = [v for k, v in ptf_ports_available_in_topo.items() if k in interface_ranges]
+ config_port_indices, ptf_ports = get_port(duthost, ptfhost, interface_ranges, request.param)
subnets = [i for i, _ in zip(subnet.subnets(new_prefix=22), config_port_indices)]
- sub_ports_config = {}
for port, ptf_port, subnet in zip(config_port_indices.values(), ptf_ports, subnets):
for vlan_id_dut, vlan_id_ptf, net in zip(vlan_ranges_dut, vlan_ranges_ptf, subnet.subnets(new_prefix=30)):
hosts_list = [i for i in net.hosts()]
@@ -67,17 +98,20 @@ def define_sub_ports_configuration(request, duthost, ptfhost):
yield {
'sub_ports': sub_ports_config,
+ 'dut_ports': config_port_indices,
+ 'ptf_ports': ptf_ports
}
@pytest.fixture
-def apply_config_on_the_dut(define_sub_ports_configuration, duthost):
+def apply_config_on_the_dut(define_sub_ports_configuration, duthost, reload_dut_config):
"""
Apply Sub-ports configuration on the DUT and remove after tests
Args:
- setup_env: Dictonary of parameters for configuration DUT
+ define_sub_ports_configuration: Dictonary of parameters for configuration DUT
duthost: DUT host object
+ reload_dut_config: fixture for teardown of DUT
Yields:
Dictonary of parameters for configuration DUT and PTF host
@@ -86,6 +120,11 @@ def apply_config_on_the_dut(define_sub_ports_configuration, duthost):
'sub_ports': define_sub_ports_configuration['sub_ports'],
}
+ parent_port_list = [sub_port.split('.')[0] for sub_port in define_sub_ports_configuration['sub_ports'].keys()]
+
+ for port in set(parent_port_list):
+ remove_member_from_vlan(duthost, '1000', port)
+
sub_ports_config_path = os.path.join(DUT_TMP_DIR, SUB_PORTS_TEMPLATE)
config_template = jinja2.Template(open(os.path.join(TEMPLATE_DIR, SUB_PORTS_TEMPLATE)).read())
@@ -93,19 +132,22 @@ def apply_config_on_the_dut(define_sub_ports_configuration, duthost):
duthost.copy(content=config_template.render(sub_ports_vars), dest=sub_ports_config_path)
duthost.command('sonic-cfggen -j {} --write-to-db'.format(sub_ports_config_path))
+ for sub_port in sub_ports_vars['sub_ports']:
+ py_assert(wait_until(3, 1, check_sub_port, duthost, sub_port),
+ "Sub-port {} was not created".format(sub_port))
+
yield sub_ports_vars
- reload_dut_config(duthost)
@pytest.fixture
-def apply_config_on_the_ptf(define_sub_ports_configuration, ptfhost):
+def apply_config_on_the_ptf(define_sub_ports_configuration, ptfhost, reload_ptf_config):
"""
Apply Sub-ports configuration on the PTF and remove after tests
Args:
- setup_env: Dictonary of parameters for configuration DUT
+ define_sub_ports_configuration: Dictonary of parameters for configuration DUT
ptfhost: PTF host object
-
+ reload_ptf_config: fixture for teardown of PTF
"""
sub_ports = define_sub_ports_configuration['sub_ports']
@@ -115,28 +157,68 @@ def apply_config_on_the_ptf(define_sub_ports_configuration, ptfhost):
ptfhost.shell("ip address add {} dev {}".format(sub_port_info['neighbor_ip'], sub_port_info['neighbor_port']))
ptfhost.shell("ip link set {} up".format(sub_port_info['neighbor_port']))
- yield
- reload_ptf_config(ptfhost, sub_ports)
-
-def reload_dut_config(duthost):
+@pytest.fixture
+def reload_dut_config(request, duthost, define_sub_ports_configuration):
"""
DUT's configuration reload on teardown
Args:
+ request: pytest request object
duthost: DUT host object
-
+ define_sub_ports_configuration: Dictonary of parameters for configuration DUT
"""
- config_reload(duthost)
+ yield
+ sub_ports = define_sub_ports_configuration['sub_ports']
+ dut_ports = define_sub_ports_configuration['dut_ports']
+ cfg_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
+
+ for sub_port, sub_port_info in sub_ports.items():
+ remove_sub_port(duthost, sub_port, sub_port_info['ip'])
+
+ if 'port_in_lag' in request.node.name:
+ for lag_port in dut_ports.values():
+ remove_lag_port(duthost, cfg_facts, lag_port)
+ duthost.shell('sudo config load -y /etc/sonic/config_db.json')
-def reload_ptf_config(ptfhost, sub_ports):
+
+@pytest.fixture
+def reload_ptf_config(request, ptfhost, define_sub_ports_configuration):
"""
PTF's configuration reload on teardown
Args:
+ request: pytest request object
ptfhost: PTF host object
+ define_sub_ports_configuration: Dictonary of parameters for configuration DUT
"""
+ yield
+ sub_ports = define_sub_ports_configuration['sub_ports']
+
for sub_port_info in sub_ports.values():
ptfhost.shell("ip address del {} dev {}".format(sub_port_info['neighbor_ip'], sub_port_info['neighbor_port']))
ptfhost.shell("ip link del {}".format(sub_port_info['neighbor_port']))
+
+ if 'port_in_lag' in request.node.name:
+ ptf_ports = define_sub_ports_configuration['ptf_ports']
+ for bond_port, port_name in ptf_ports.items():
+ ptfhost.shell("ip link set {} nomaster".format(bond_port))
+ ptfhost.shell("ip link set {} nomaster".format(port_name))
+ ptfhost.shell("ip link set {} up".format(port_name))
+ ptfhost.shell("ip link del {}".format(bond_port))
+
+ ptfhost.shell("supervisorctl restart ptf_nn_agent")
+ time.sleep(5)
+
+
+@pytest.fixture(scope="module", autouse=True)
+def teardown_test_class(duthost):
+ """
+ Reload DUT configuration after running of test suite
+
+ Args:
+ duthost: DUT host object
+ """
+ yield
+ config_reload(duthost)
diff --git a/tests/sub_port_interfaces/sub_ports_helpers.py b/tests/sub_port_interfaces/sub_ports_helpers.py
index 09e416a23e1..6815f4ad38e 100644
--- a/tests/sub_port_interfaces/sub_ports_helpers.py
+++ b/tests/sub_port_interfaces/sub_ports_helpers.py
@@ -1,9 +1,16 @@
import os
+import time
+
+from collections import OrderedDict
import ptf.testutils as testutils
import ptf.mask as mask
import ptf.packet as packet
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.utilities import wait_until
+
+
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DUT_TMP_DIR = os.path.join('tmp', os.path.basename(BASE_DIR))
TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates')
@@ -34,7 +41,7 @@ def create_packet(eth_dst, eth_src, ip_dst, ip_src, vlan_vid, dl_vlan_enable=Fal
def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src, ip_dst, pkt_action):
"""
Send ICMP request packet from PTF to DUT and
- verify that DUT sends/doesn't sends ICMP reply packet to PTF.
+ verify that DUT sends/doesn't send ICMP reply packet to PTF.
Args:
duthost: DUT host object
@@ -44,7 +51,6 @@ def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src,
ip_src: Source IP address of PTF
ip_dst: Destination IP address of DUT
pkt_action: Packet action (forwarded or drop)
-
"""
router_mac = get_mac_dut(duthost, dst_port)
src_port_number = int(get_port_number(src_port))
@@ -78,7 +84,9 @@ def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src,
masked_exp_pkt.set_do_not_care_scapy(packet.IP, "ttl")
masked_exp_pkt.set_do_not_care_scapy(packet.ICMP, "chksum")
+ ptfadapter.dataplane.flush()
testutils.send_packet(ptfadapter, src_port_number, pkt)
+
dst_port_list = [src_port_number]
if pkt_action == ACTION_FWD:
@@ -87,6 +95,153 @@ def generate_and_verify_traffic(duthost, ptfadapter, src_port, dst_port, ip_src,
testutils.verify_no_packet_any(ptfadapter, masked_exp_pkt, dst_port_list)
+def shutdown_port(duthost, interface):
+ """
+ Shutdown port on the DUT
+
+ Args:
+ duthost: DUT host object
+ interface: Interface of DUT
+ """
+ duthost.shutdown(interface)
+ pytest_assert(wait_until(3, 1, __check_interface_state, duthost, interface, 'down'),
+ "DUT's port {} didn't go down as expected".format(interface))
+
+
+def startup_port(duthost, interface):
+ """
+ Startup port on the DUT
+
+ Args:
+ duthost: DUT host object
+ interface: Interface of DUT
+ """
+ duthost.no_shutdown(interface)
+ pytest_assert(wait_until(3, 1, __check_interface_state, duthost, interface),
+ "DUT's port {} didn't go up as expected".format(interface))
+
+
+def __check_interface_state(duthost, interface, state='up'):
+ """
+ Check interface status
+
+ Args:
+ duthost: DUT host object
+ interface: Interface of DUT
+ state: state of DUT's interface
+
+ Returns:
+ Bool value which confirm port state
+ """
+ ports_down = duthost.interface_facts(up_ports=[interface])['ansible_facts']['ansible_interface_link_down_ports']
+
+ if 'down' in state:
+ return interface in ports_down
+ return interface not in ports_down
+
+
+def setup_vlan(duthost, vlan_id):
+ """
+ Setup VLAN's configuraation to DUT
+
+ Args:
+ duthost: DUT host object
+ vlan_id: VLAN id
+ """
+ duthost.shell('config vlan add %s' % vlan_id)
+
+ pytest_assert(wait_until(3, 1, __check_vlan, duthost, vlan_id),
+ "VLAN RIF Vlan{} didn't create as expected".format(vlan_id))
+
+
+def __check_vlan(duthost, vlan_id, removed=False):
+ """
+ Check availability of VLAN in redis-db
+
+ Args:
+ duthost: DUT host object
+ vlan_id: VLAN id
+ removed: Bool value which show availability of VLAN
+
+ Returns:
+ Bool value which confirm availability of VLAN in redis-db
+ """
+ vlan_name = 'Vlan{}'.format(vlan_id)
+ out = duthost.shell('redis-cli -n 4 keys "VLAN|{}"'.format(vlan_name))["stdout"]
+ if removed:
+ return vlan_name not in out
+ return vlan_name in out
+
+
+def __check_vlan_member(duthost, vlan_id, vlan_member, removed=False):
+ """
+ Check that VLAN member is available in redis-db
+
+ Args:
+ duthost: DUT host object
+ vlan_id: VLAN id
+ vlan_member: VLAN member
+ removed: Bool value which show availability of member in VLAN
+
+ Returns:
+ Bool value which confirm availability of VLAN member in redis-db
+ """
+ vlan_name = 'Vlan{}'.format(vlan_id)
+ out = duthost.shell('redis-cli -n 4 keys "VLAN_MEMBER|{}|{}"'.format(vlan_name, vlan_member))["stdout"]
+ if removed:
+ return vlan_name not in out
+ return vlan_name in out
+
+
+def remove_vlan(duthost, vlan_id):
+ """
+ Remove VLANs configuraation on DUT
+
+ Args:
+ duthost: DUT host object
+ vlan_id: VLAN id
+ """
+ duthost.shell('config vlan del {}'.format(vlan_id))
+
+ pytest_assert(wait_until(3, 1, __check_vlan, duthost, vlan_id, True),
+ "VLAN RIF Vlan{} didn't remove as expected".format(vlan_id))
+
+
+def remove_member_from_vlan(duthost, vlan_id, vlan_member):
+ """
+ Remove members of VLAN on DUT
+
+ Args:
+ duthost: DUT host object
+ vlan_id: VLAN id
+ vlan_member: VLAN member
+ """
+ if __check_vlan_member(duthost, vlan_id, vlan_member):
+ duthost.shell('config vlan member del {} {}'.format(vlan_id, vlan_member))
+ pytest_assert(wait_until(3, 1, __check_vlan_member, duthost, vlan_id, vlan_member, True),
+ "VLAN RIF Vlan{} have {} member".format(vlan_id, vlan_member))
+
+
+def check_sub_port(duthost, sub_port, removed=False):
+ """
+ Check that sub-port is available in redis-db
+
+ Args:
+ duthost: DUT host object
+ sub_port: Sub-port interface of DUT
+ removed: Bool value which show availability of sub-port on the DUT
+
+ Returns:
+ Bool value which confirm availability of sub-port in redis-db
+ """
+ config_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
+ sub_ports = set(config_facts.get('VLAN_SUB_INTERFACE', {}).keys())
+ if removed:
+ return sub_port not in sub_ports
+
+ return sub_port in sub_ports
+
+
def get_mac_dut(duthost, interface):
"""
Get MAC address of DUT interface
@@ -110,3 +265,145 @@ def get_port_number(interface):
Returns: Number of port
"""
return ''.join([i for i in interface.split('.')[0] if i.isdigit()])
+
+
+def get_port_mtu(duthost, interface):
+ """
+ Get MTU of port from interface name
+
+ Args:
+ duthost: DUT host object
+ interface: Full interface name
+
+ Returns: MTU
+ """
+ out = ''
+
+ if '.' in interface:
+ out = duthost.show_and_parse("show subinterface status {}".format(interface))
+ return out[0]['mtu']
+
+ out = duthost.show_and_parse("show interface status {}".format(interface))
+ return out[0]['mtu']
+
+
+def create_lag_port(duthost, config_port_indices):
+ """
+ Create lag ports on the DUT
+
+ Args:
+ duthost: DUT host object
+ config_port_indices: Dictionary of port on the DUT
+
+ Returns:
+ Dictonary of lag ports on the DUT
+ """
+ lag_port_map = {}
+ for port_index, port_name in config_port_indices.items():
+ lag_port = 'PortChannel{}'.format(port_index)
+ remove_ip_from_port(duthost, port_name)
+ remove_member_from_vlan(duthost, '1000', port_name)
+ duthost.shell('config portchannel add {}'.format(lag_port))
+ duthost.shell('config portchannel member add {} {}'.format(lag_port, port_name))
+ lag_port_map[port_index] = lag_port
+
+ return lag_port_map
+
+
+def create_bond_port(ptfhost, ptf_ports):
+ """
+ Create bond ports on the PTF
+
+ Args:
+ ptfhost: PTF host object
+ ptf_ports: List of ports on the PTF
+
+ Returns:
+ Dictonary of bond ports and slave ports on the PTF
+ """
+ bond_port_map = OrderedDict()
+ for port_index, port_name in ptf_ports.items():
+ bond_port = 'bond{}'.format(port_index)
+ ptfhost.shell("ip link add {} type bond".format(bond_port))
+ ptfhost.shell("ip link set {} type bond miimon 100 mode 802.3ad".format(bond_port))
+ ptfhost.shell("ip link set {} down".format(port_name))
+ ptfhost.shell("ip link set {} master {}".format(port_name, bond_port))
+ ptfhost.shell("ip link set dev {} up".format(bond_port))
+ ptfhost.shell("ifconfig {} mtu 9216 up".format(bond_port))
+
+ bond_port_map[bond_port] = port_name
+
+ ptfhost.shell("supervisorctl restart ptf_nn_agent")
+ time.sleep(5)
+
+ return bond_port_map
+
+
+def get_port(duthost, ptfhost, interface_ranges, port_type):
+ """
+ Get port configurations from DUT and PTF
+
+ Args:
+ duthost: DUT host object
+ ptfhost: PTF host object
+ interface_ranges: numbers of ports
+
+ Returns:
+ Tuple with port configurations of DUT and PTF
+ """
+ cfg_facts = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts']
+ config_vlan_members = cfg_facts['port_index_map']
+ config_port_indices = {v: k for k, v in cfg_facts['port_index_map'].items() if k in config_vlan_members and v in interface_ranges}
+ ptf_ports_available_in_topo = ptfhost.host.options['variable_manager'].extra_vars.get("ifaces_map")
+ ptf_ports = {port_id: ptf_ports_available_in_topo[port_id] for port_id in interface_ranges}
+
+ if port_type == 'port_in_lag':
+ lag_port_map = create_lag_port(duthost, config_port_indices)
+ bond_port_map = create_bond_port(ptfhost, ptf_ports)
+
+ return (lag_port_map, bond_port_map)
+
+ return (config_port_indices, ptf_ports.values())
+
+
+def remove_sub_port(duthost, sub_port, ip):
+ """
+ Remove sub-port from redis-db
+
+ Args:
+ duthost: DUT host object
+ sub_port: Sub-port name
+ interface: Interface of DUT
+ """
+ duthost.shell('config interface ip remove {} {}'.format(sub_port, ip))
+ duthost.shell('redis-cli -n 4 del "VLAN_SUB_INTERFACE|{}"'.format(sub_port))
+ pytest_assert(check_sub_port(duthost, sub_port, True), "Sub-port {} was not deleted".format(sub_port))
+
+
+def remove_lag_port(duthost, cfg_facts, lag_port):
+ """
+ Remove lag-port from DUT
+
+ Args:
+ duthost: DUT host object
+ cfg_facts: Ansible config_facts
+ lag_port: lag-port name
+ """
+ lag_members = cfg_facts['PORTCHANNEL_MEMBER'][lag_port].keys()
+ for port in lag_members:
+ duthost.shell('config portchannel member del {} {}'.format(lag_port, port))
+ duthost.shell('config portchannel del {}'.format(lag_port))
+
+
+def remove_ip_from_port(duthost, port):
+ """
+ Remove ip addresses from port
+
+ Args:
+ duthost: DUT host object
+ port: port name
+ """
+ ip_addresses = duthost.config_facts(host=duthost.hostname, source="running")['ansible_facts'].get('INTERFACE', {}).get(port, {})
+ if ip_addresses:
+ for ip in ip_addresses:
+ duthost.shell('config interface ip remove {} {}'.format(port, ip))
diff --git a/tests/sub_port_interfaces/test_sub_port_interfaces.py b/tests/sub_port_interfaces/test_sub_port_interfaces.py
index c198f9fd3a7..33143028feb 100644
--- a/tests/sub_port_interfaces/test_sub_port_interfaces.py
+++ b/tests/sub_port_interfaces/test_sub_port_interfaces.py
@@ -2,12 +2,20 @@
Tests sub-port interfaces in SONiC.
"""
+import random
import pytest
+from tests.common.helpers.assertions import pytest_assert
from sub_ports_helpers import generate_and_verify_traffic
+from sub_ports_helpers import get_port_mtu
+from sub_ports_helpers import shutdown_port
+from sub_ports_helpers import startup_port
+from sub_ports_helpers import setup_vlan
+from sub_ports_helpers import remove_vlan
+from sub_ports_helpers import check_sub_port
pytestmark = [
- pytest.mark.topology('t0')
+ pytest.mark.topology('t0', 't1')
]
class TestSubPorts(object):
@@ -15,7 +23,7 @@ class TestSubPorts(object):
TestSubPorts class for testing sub-port interfaces
"""
- def test_packet_routed_with_valid_vlan(self, duthost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf):
+ def test_packet_routed_with_valid_vlan(self, duthost, ptfhost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf):
"""
Validates that packet routed if sub-ports have valid VLAN ID.
@@ -50,7 +58,7 @@ def test_packet_routed_with_invalid_vlan(self, duthost, ptfadapter, apply_config
2.) Setup different VLAN IDs on directly connected interfaces of sub-ports on the PTF.
3.) Create ICMP packet.
4.) Send ICMP request packet from PTF to DUT.
- 5.) Verify that DUT doesn't sends ICMP reply packet to PTF.
+ 5.) Verify that DUT doesn't send ICMP reply packet to PTF.
6.) Clear configuration of sub-ports on the DUT.
7.) Clear configuration of sub-ports on the DUT.
@@ -66,3 +74,156 @@ def test_packet_routed_with_invalid_vlan(self, duthost, ptfadapter, apply_config
dst_port=sub_port,
ip_dst=value['ip'],
pkt_action='drop')
+
+
+ def test_admin_status_down_disables_forwarding(self, duthost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf):
+ """
+ Validates that admin status DOWN disables packet forwarding.
+
+ Test steps:
+ 1.) Setup configuration of sub-ports on the DUT.
+ 2.) Setup configuration of sub-ports on the PTF.
+ 3.) Shutdown sub-ports on the DUT
+ 4.) Create ICMP packet.
+ 5.) Send ICMP request packet from PTF to DUT.
+ 6.) Verify that DUT doesn't send ICMP reply packet to PTF.
+ 7.) Create ICMP packet.
+ 8.) Send ICMP request packet from PTF to another sub-port of DUT.
+ 9.) Verify that DUT sends ICMP reply packet to PTF.
+ 10.) Startup sub-port on the DUT
+ 11.) Create ICMP packet.
+ 12.) Send ICMP request packet from PTF to DUT.
+ 13.) Verify that DUT sends ICMP reply packet to PTF.
+ 14.) Clear configuration of sub-ports on the DUT.
+ 15.) Clear configuration of sub-ports on the PTF.
+
+ Pass Criteria: PTF doesn't get ICMP reply packet from disabled sub-ports of DUT.
+ """
+ sub_ports = apply_config_on_the_dut['sub_ports']
+
+ for sub_port, value in sub_ports.items():
+ shutdown_port(duthost, sub_port)
+ generate_and_verify_traffic(duthost=duthost,
+ ptfadapter=ptfadapter,
+ src_port=value['neighbor_port'],
+ ip_src=value['neighbor_ip'],
+ dst_port=sub_port,
+ ip_dst=value['ip'],
+ pkt_action='drop')
+
+ for next_sub_port, next_value in sub_ports.items():
+ if next_sub_port != sub_port:
+ generate_and_verify_traffic(duthost=duthost,
+ ptfadapter=ptfadapter,
+ src_port=next_value['neighbor_port'],
+ ip_src=next_value['neighbor_ip'],
+ dst_port=next_sub_port,
+ ip_dst=next_value['ip'],
+ pkt_action='fwd')
+
+ startup_port(duthost, sub_port)
+ generate_and_verify_traffic(duthost=duthost,
+ ptfadapter=ptfadapter,
+ src_port=value['neighbor_port'],
+ ip_src=value['neighbor_ip'],
+ dst_port=sub_port,
+ ip_dst=value['ip'],
+ pkt_action='fwd')
+
+
+ def test_max_numbers_of_sub_ports(self, duthost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf):
+ """
+ Validates that 256 sub-ports can be created per port or LAG
+
+ Test steps:
+ 1.) Setup configuration of 256 sub-ports on the DUT.
+ 2.) Setup configuration of 256 sub-ports on the PTF.
+ 3.) Create ICMP packet.
+ 4.) Send ICMP request packet from PTF to DUT.
+ 5.) Verify that DUT sends ICMP reply packet to PTF.
+ 6.) Clear configuration of sub-ports on the DUT.
+ 7.) Clear configuration of sub-ports on the PTF.
+
+ Pass Criteria: PTF gets ICMP reply packet from DUT.
+
+ Note:
+ The running of the test case takes about 80 minutes.
+ """
+ sub_ports_new = dict()
+ sub_ports = apply_config_on_the_dut['sub_ports']
+ sub_ports_new[sub_ports.keys()[0]] = sub_ports[sub_ports.keys()[0]]
+ sub_ports_new[sub_ports.keys()[-1]] = sub_ports[sub_ports.keys()[-1]]
+
+ rand_sub_ports = sub_ports.keys()[random.randint(1, len(sub_ports)-1)]
+ sub_ports_new[rand_sub_ports] = sub_ports[rand_sub_ports]
+
+ for sub_port, value in sub_ports_new.items():
+ generate_and_verify_traffic(duthost=duthost,
+ ptfadapter=ptfadapter,
+ src_port=value['neighbor_port'],
+ ip_src=value['neighbor_ip'],
+ dst_port=sub_port,
+ ip_dst=value['ip'],
+ pkt_action='fwd')
+
+
+ def test_mtu_inherited_from_parent_port(self, duthost, apply_config_on_the_dut, apply_config_on_the_ptf):
+ """
+ Validates that MTU settings of sub-ports inherited from parent port
+
+ Test steps:
+ 1.) Setup correct configuration of sub-ports on the DUT.
+ 3.) Get MTU value of sub-port
+ 4.) Get MTU value of parent port
+ 6.) Clear configuration of sub-ports on the DUT.
+
+ Pass Criteria: MTU settings of sub-ports inherited from parent port.
+ """
+ sub_ports = apply_config_on_the_dut['sub_ports']
+
+ for sub_port in sub_ports.keys():
+ sub_port_mtu = int(get_port_mtu(duthost, sub_port))
+ # Get name of parent port from name of sub-port
+ port = sub_port.split('.')[0]
+ port_mtu = int(get_port_mtu(duthost, port))
+
+ pytest_assert(sub_port_mtu == port_mtu, "MTU of {} doesn't inherit MTU of {}".format(sub_port, port))
+
+
+ def test_vlan_config_impact(self, duthost, ptfadapter, apply_config_on_the_dut, apply_config_on_the_ptf):
+ """
+ Validates that removal of VLAN doesn't impact sub-port RIF with same VLAN ID.
+
+ Test steps:
+ 1.) Setup correct configuration of sub-ports on the DUT.
+ 3.) Create a VLAN RIF with the same VLAN ID of sub-port.
+ 4.) Added PortChannel interface to VLAN members.
+ 5.) Delete a VLAN RIF.
+ 6.) Make sure sub-port is available in redis-db.
+ 7.) Verify that DUT sends ICMP reply packet to PTF.
+ 8.) Clear configuration of sub-ports on the DUT.
+ 9.) Clear configuration of sub-ports on the PTF.
+
+ Pass Criteria:
+ 1.) Sub-port is available in redis-db.
+ 2.) PTF gets ICMP reply packet from DUT.
+ """
+ sub_ports = apply_config_on_the_dut['sub_ports']
+
+ for sub_port, value in sub_ports.items():
+ # Get VLAN ID from name of sub-port
+ vlan_vid = int(sub_port.split('.')[1])
+ # Create a VLAN RIF
+ setup_vlan(duthost, vlan_vid)
+ # Delete a VLAN RIF
+ remove_vlan(duthost, vlan_vid)
+
+ pytest_assert(check_sub_port(duthost, sub_port), "Sub-port {} was deleted".format(sub_port))
+
+ generate_and_verify_traffic(duthost=duthost,
+ ptfadapter=ptfadapter,
+ src_port=value['neighbor_port'],
+ ip_src=value['neighbor_ip'],
+ dst_port=sub_port,
+ ip_dst=value['ip'],
+ pkt_action='fwd')
diff --git a/tests/system_health/test_system_health.py b/tests/system_health/test_system_health.py
index 588a17e05e8..df900d3788f 100644
--- a/tests/system_health/test_system_health.py
+++ b/tests/system_health/test_system_health.py
@@ -6,6 +6,7 @@
from pkg_resources import parse_version
from tests.common.utilities import wait_until
from tests.common.helpers.assertions import pytest_require
+from tests.platform_tests.thermal_control_test_helper import disable_thermal_policy
from device_mocker import device_mocker_factory
pytestmark = [
@@ -97,7 +98,8 @@ def test_service_checker(duthosts, rand_one_dut_hostname):
assert summary == expect_summary, 'Expect summary {}, got {}'.format(expect_summary, summary)
-def test_device_checker(duthosts, rand_one_dut_hostname, device_mocker_factory):
+@pytest.mark.disable_loganalyzer
+def test_device_checker(duthosts, rand_one_dut_hostname, device_mocker_factory, disable_thermal_policy):
duthost = duthosts[rand_one_dut_hostname]
device_mocker = device_mocker_factory(duthost)
wait_system_health_boot_up(duthost)
diff --git a/tests/tacacs/test_ro_user.py b/tests/tacacs/test_ro_user.py
index 3270ad1a71c..abffbbdd4ed 100644
--- a/tests/tacacs/test_ro_user.py
+++ b/tests/tacacs/test_ro_user.py
@@ -80,10 +80,20 @@ def test_ro_user_allowed_command(localhost, duthosts, rand_one_dut_hostname, cre
# 'sudo psuutil *',
# 'sudo sfputil show *',
'sudo ip netns identify 1',
+ 'sudo ipintutil',
+ 'sudo ipintutil -a ipv6',
+ 'sudo ipintutil -n asic0 -d all',
+ 'sudo ipintutil -n asic0 -d all -a ipv6'
]
# Run as readonly use the commands allowed indirectly based on sudoers file
commands_indirect = [
'show version',
+ 'show interface status',
+ 'show interface portchannel',
+ 'show ip bgp summary',
+ 'show ip interface',
+ 'show ipv6 interface',
+ 'show lldp table'
]
for command in commands_direct + commands_indirect:
@@ -105,6 +115,8 @@ def test_ro_user_banned_command(localhost, duthosts, rand_one_dut_hostname, cred
# Run as readonly use the commands allowed by sudoers file
commands = [
'sudo shutdown',
+ # all commands under the config tree
+ 'sudo config'
]
for command in commands:
diff --git a/tests/templates/garp_service.conf.j2 b/tests/templates/garp_service.conf.j2
new file mode 100644
index 00000000000..c15d7967ccb
--- /dev/null
+++ b/tests/templates/garp_service.conf.j2
@@ -0,0 +1,10 @@
+[program:garp_service]
+command=/usr/bin/python /opt/garp_service.py {{ garp_service_args }}
+process_name=garp_service
+stdout_logfile=/tmp/garp_service.out.log
+stderr_logfile=/tmp/garp_service.err.log
+redirect_stderr=false
+autostart=false
+autorestart=false
+startsecs=1
+numprocs=1
diff --git a/tests/templates/y_cable_simulator_client.j2 b/tests/templates/y_cable_simulator_client.j2
new file mode 100644
index 00000000000..909b6e6836c
--- /dev/null
+++ b/tests/templates/y_cable_simulator_client.j2
@@ -0,0 +1,271 @@
+from urllib import request, error
+import json
+import os
+import re
+from sonic_py_common import logger, device_info
+from portconfig import get_port_config
+from natsort import natsorted
+from sonic_py_common.interface import backplane_prefix
+
+DUTS_MAP = {{ duts_map }}
+
+VM_SET = "{{ group_name }}"
+
+DUT_NAME = "{{ dut_name }}"
+
+BASE_URL = "http://{{ mux_simulator_server }}:{{ mux_simulator_port }}/"
+
+SYSLOG_IDENTIFIER = "y_cable_sim"
+helper_logger = logger.Logger(SYSLOG_IDENTIFIER)
+
+UPPER_TOR = "upper_tor"
+LOWER_TOR = "lower_tor"
+
+PLATFORM_JSON = 'platform.json'
+PORT_CONFIG_INI = 'port_config.ini'
+
+# A dict for mapping physical port to host interface
+g_physical_to_host_port_mapping = {}
+
+def _physical_port_to_host_port(physical_port):
+ """
+ Convert physical port to host interface index.
+ The physical port index got from spf is different with host interface index when interface is splited.
+ However, the mux bridges on vm_host are named according to host interface index (vm_topology.py).
+ So a convert is needed.
+
+ @arg physical_port: The physical port index got from sfputil
+ @returns: The host interface index (0-based)
+ """
+ global g_physical_to_host_port_mapping
+
+ if not g_physical_to_host_port_mapping:
+ _load_port_info()
+ # Return physical_port - 1 in case loading port config file failure
+ return g_physical_to_host_port_mapping.get(physical_port, physical_port - 1)
+
+def _load_port_info():
+ """
+ Parse platform.json or port_config.ini to get the mapping between physical_port and host interface index
+ and physical_port and logical port name. The mapping is saved in two global variables.
+ """
+ porttabfile = device_info.get_path_to_port_config_file()
+ parse_fmt_platform_json = (os.path.basename(porttabfile) == PLATFORM_JSON)
+
+ if parse_fmt_platform_json:
+ _load_port_config_json()
+ else:
+ _load_port_config_ini(porttabfile)
+
+def _load_port_config_json():
+ """
+ A helper function for loading port config from 'platform.json'
+ """
+ global g_physical_to_host_port_mapping
+
+ (platform, hwsku) = device_info.get_platform_and_hwsku()
+ ports, _, _ = get_port_config(hwsku, platform)
+ if not ports:
+ helper_logger.log_warning('Failed to get port config')
+ return
+ else:
+ logical_list = []
+ for intf in ports.keys():
+ logical_list.append(intf)
+
+ logical = natsorted(logical_list, key=lambda y: y.lower())
+ host_intf_index = 0
+ for intf_name in logical:
+ fp_port_index = -1
+ if 'index' in ports[intf_name].keys():
+ fp_port_index = int(ports[intf_name]['index'])
+ if fp_port_index not in g_physical_to_host_port_mapping:
+ g_physical_to_host_port_mapping[fp_port_index] = host_intf_index;
+ host_intf_index += 1
+
+def _load_port_config_ini(porttabfile):
+ """
+ A helper function for loading port config from 'port_config.ini'
+ """
+ global g_physical_to_host_port_mapping
+
+ parse_fmt_port_config_ini = (os.path.basename(porttabfile) == PORT_CONFIG_INI)
+ host_intf_index = 0
+ with open(porttabfile, "r") as f:
+ # Read the porttab file and generate dicts
+ # with mapping for future reference.
+ title = []
+ for line in f:
+ line.strip()
+ if re.search("^#", line) is not None:
+ # The current format is: # name lanes alias index speed
+ # Where the ordering of the columns can vary
+ title = line.lstrip('#').strip().split()
+ continue
+ # Parsing logic for 'port_config.ini' file
+ if (parse_fmt_port_config_ini):
+ # bcm_port is not explicitly listed in port_config.ini format
+ # Currently we assume ports are listed in numerical order according to bcm_port
+ # so we use the port's position in the file (zero-based) as bcm_port
+ portname = line.split()[0]
+
+ # Ignore if this is an internal backplane interface
+ if portname.startswith(backplane_prefix()):
+ continue
+
+ if "index" in title:
+ fp_port_index = int(line.split()[title.index("index")])
+ # Leave the old code for backward compatibility
+ elif "asic_port_name" not in title and len(line.split()) >= 4:
+ fp_port_index = int(line.split()[3])
+ else:
+ fp_port_index = portname.split("Ethernet").pop()
+ fp_port_index = int(fp_port_index.split("s").pop(0))/4
+ else:
+ # Parsing logic for older 'portmap.ini' file
+ (portname, bcm_port) = line.split("=")[1].split(",")[:2]
+
+ fp_port_index = portname.split("Ethernet").pop()
+ fp_port_index = int(fp_port_index.split("s").pop(0))/4
+ if fp_port_index not in g_physical_to_host_port_mapping:
+ g_physical_to_host_port_mapping[fp_port_index] = host_intf_index
+
+ # Next line, next host index
+ host_intf_index += 1
+
+def _url(physical_port):
+ """
+ Helper function to build an url for given physical_port
+
+ Args:
+ physical_port: physical port on switch, an integer starting from 1
+ Returns:
+ str: The url for post/get.
+ """
+ host_intf_index = _physical_port_to_host_port(physical_port)
+ return BASE_URL + "/mux/{}/{}".format(VM_SET, host_intf_index)
+
+def _post(physical_port, data):
+ """
+ Helper function for posting data to y_cable server.
+
+ Args:
+ physical_port: physical port on switch, an integer starting from 1
+ data: data to post
+ Returns:
+ True if succeed. False otherwise
+ """
+ data = json.dumps(data).encode(encoding='utf-8')
+ header = {'Accept': 'application/json', 'Content-Type': 'application/json'}
+ req = request.Request(url=_url(physical_port), data=data, headers=header)
+ try:
+ _ = request.urlopen(req)
+ except error.HTTPError as e:
+ try:
+ err_msg = json.loads(e.read().decode())['err_msg']
+ helper_logger.log_warning("post request returns err. status_code = {} err_msg = {}".format(e.code, err_msg))
+ except Exception:
+ helper_logger.log_warning("post request returns err. status_code = {}".format(e.code))
+ return False
+ except error.URLError as e:
+ helper_logger.log_warning("post request returns err. err_msg = {}".format(str(e)))
+ return False
+ return True
+
+def _get(physical_port):
+ """
+ Helper function for polling status from y_cable server.
+
+ Args:
+ physical_port: physical port on switch, an integer starting from 1
+ Returns:
+ dict: A dict decoded from server's response.
+ None: Returns None is error is detected.
+ """
+ req = request.Request(url=_url(physical_port))
+ try:
+ res = request.urlopen(req)
+ data = res.read()
+ return json.loads(data)
+ except error.HTTPError as e:
+ err_msg = json.loads(e.read().decode())['err_msg']
+ helper_logger.log_warning("get request returns err. status_code = {} err_msg = {}".format(e.code, err_msg))
+ except error.URLError as e:
+ helper_logger.log_warning("get request returns err. err_msg = {}".format(str(e)))
+ except json.decoder.JSONDecodeError as e:
+ helper_logger.log_warning("failed to parse response as json. err_msg = {}".format(str(e)))
+ except Exception as e:
+ helper_logger.log_warning("get request returns err. err_msg = {}".format(str(e)))
+ return None
+
+def _toggle_to(physical_port, target):
+ """
+ Helper function for toggling to certain TOR.
+
+ Args:
+ physical_port: physical port on switch, an integer starting from 1
+ target: UPPER_TOR / LOWER_TOR
+ Returns:
+ True if succeed. False otherwise
+ """
+ data = {"active_side": target}
+ helper_logger.log_info("physical_port {} toggle to {}".format(physical_port, target))
+ return _post(physical_port, data)
+
+def _get_side(physical_port):
+ """
+ Retrieve the current active tor from y_cable simulator server.
+ Args:
+ physical_port: physical port on switch, an integer starting from 1
+ Returns:
+ 1 if UPPER_TOR is active
+ 2 if LOWER_TOR is active
+ -1 for exception or inconstient status
+ """
+ res = _get(physical_port)
+ if not res:
+ return -1
+ active_side = res["active_side"]
+ if active_side == UPPER_TOR:
+ return 1
+ elif active_side == LOWER_TOR:
+ return 2
+ else:
+ return -1
+
+def toggle_mux_to_torA(physical_port):
+ return _toggle_to(physical_port, UPPER_TOR)
+
+def toggle_mux_to_torB(physical_port):
+ return _toggle_to(physical_port, LOWER_TOR)
+
+def check_read_side(physical_port):
+ return DUTS_MAP[DUT_NAME] + 1
+
+def check_mux_direction(physical_port):
+ return _get_side(physical_port)
+
+def check_active_linked_tor_side(physical_port):
+ return _get_side(physical_port)
+
+def check_if_link_is_active_for_NIC(physical_port):
+ """
+ Checks if NIC side of the Y cable's link is active.
+ Always return True for now because all links in simulator are active.
+ """
+ return True
+
+def check_if_link_is_active_for_torA(physical_port):
+ """
+ Checks if UPPER_TOR side of the Y cable's link is active.
+ Always return True for now because all links in simulator are active.
+ """
+ return True
+
+def check_if_link_is_active_for_torB(physical_port):
+ """
+ Checks if LOWER_TOR side of the Y cable's link is active.
+ Always return True for now because all links in simulator are active.
+ """
+ return True
+
diff --git a/tests/test_pretest.py b/tests/test_pretest.py
index e10c8ea203c..4d3dbf60437 100644
--- a/tests/test_pretest.py
+++ b/tests/test_pretest.py
@@ -4,6 +4,7 @@
import time
import os
+from jinja2 import Template
from common.helpers.assertions import pytest_require
logger = logging.getLogger(__name__)
@@ -14,6 +15,13 @@
pytest.mark.disable_loganalyzer
]
+
+def test_cleanup_cache():
+ folder = '_cache'
+ if os.path.exists(folder):
+ os.system('rm -rf {}'.format(folder))
+
+
def test_cleanup_testbed(duthosts, enum_dut_hostname, request, ptfhost):
duthost = duthosts[enum_dut_hostname]
deep_clean = request.config.getoption("--deep_clean")
@@ -165,6 +173,50 @@ def test_update_saithrift_ptf(request, ptfhost):
ptfhost.shell("dpkg -i {}".format(os.path.join("/root", pkg_name)))
logging.info("Python saithrift package installed successfully")
+def test_inject_y_cable_simulator_client(duthosts, enum_dut_hostname, tbinfo):
+ '''
+ Inject the Y cable simulator client to both ToRs in a dualtor testbed
+ '''
+ if 'dualtor' not in tbinfo['topo']['name']:
+ return
+
+ logger.info("Injecting Y cable simulator client to {}".format(enum_dut_hostname))
+ dut = duthosts[enum_dut_hostname]
+ mux_simulator_port = 8080
+ y_cable_sim_client_template_path = 'templates/y_cable_simulator_client.j2'
+
+ server_num = tbinfo['server'].split('_')[-1]
+ mux_simulator_server = dut.host.options['inventory_manager'] \
+ .get_hosts(pattern='vm_host_{}'.format(server_num))[0] \
+ .get_vars()['ansible_host']
+
+ template_args = {
+ 'duts_map': json.dumps(tbinfo['duts_map'], sort_keys=True, indent=4),
+ 'mux_simulator_server': mux_simulator_server,
+ 'mux_simulator_port': mux_simulator_port,
+ 'dut_name': enum_dut_hostname,
+ 'group_name': tbinfo['group-name']
+ }
+
+ with open(y_cable_sim_client_template_path) as f:
+ template = Template(f.read())
+
+ rendered = template.render(template_args)
+
+ dut.copy(content=rendered, dest='/tmp/y_cable_simulator_client.py')
+ dut.shell('docker cp /tmp/y_cable_simulator_client.py pmon:/usr/lib/python3/dist-packages/')
+ dut.shell('systemctl restart pmon')
+
+def test_stop_pfcwd(duthosts, enum_dut_hostname, tbinfo):
+ '''
+ Stop pfcwd on dual tor testbeds
+ '''
+ if 'dualtor' not in tbinfo['topo']['name']:
+ pytest.skip("Skip this test on non dualTOR testbeds")
+
+ dut = duthosts[enum_dut_hostname]
+ dut.command('pfcwd stop')
+
"""
Separator for internal pretests.
Please add public pretest above this comment and keep internal
diff --git a/tests/upgrade_path/conftest.py b/tests/upgrade_path/conftest.py
index 7bbc01e6b3c..10a495e9d11 100644
--- a/tests/upgrade_path/conftest.py
+++ b/tests/upgrade_path/conftest.py
@@ -5,6 +5,12 @@
def pytest_addoption(parser):
options_group = parser.getgroup("Upgrade_path test suite options")
+ options_group.addoption(
+ "--upgrade_type",
+ default="warm",
+ help="Specify the type (warm/fast/cold) of upgrade that is needed from source to target image",
+ )
+
options_group.addoption(
"--base_image_list",
default="",
@@ -33,7 +39,8 @@ def pytest_runtest_setup(item):
@pytest.fixture(scope="module")
def upgrade_path_lists(request):
+ upgrade_type = request.config.getoption('upgrade_type')
from_list = request.config.getoption('base_image_list')
to_list = request.config.getoption('target_image_list')
restore_to_image = request.config.getoption('restore_to_image')
- return from_list, to_list, restore_to_image
+ return upgrade_type, from_list, to_list, restore_to_image
diff --git a/tests/upgrade_path/test_upgrade_path.py b/tests/upgrade_path/test_upgrade_path.py
index 9b150f19b9c..028f6a46bd3 100644
--- a/tests/upgrade_path/test_upgrade_path.py
+++ b/tests/upgrade_path/test_upgrade_path.py
@@ -12,8 +12,8 @@
from tests.common.helpers.assertions import pytest_assert
from tests.common.platform.ssh_utils import prepare_testbed_ssh_keys
from tests.common import reboot
-from tests.common.reboot import get_reboot_cause
-from tests.common.reboot import REBOOT_TYPE_WARM
+from tests.common.reboot import get_reboot_cause, reboot_ctrl_dict
+from tests.common.reboot import REBOOT_TYPE_WARM, REBOOT_TYPE_COLD
from tests.common.fixtures.ptfhost_utils import copy_ptftests_directory # lgtm[py/unused-import]
@@ -44,7 +44,7 @@ def setup(localhost, ptfhost, duthosts, rand_one_dut_hostname, upgrade_path_list
def cleanup(localhost, ptfhost, duthost, upgrade_path_lists, tbinfo):
- _, _, restore_to_image = upgrade_path_lists
+ _, _, _, restore_to_image = upgrade_path_lists
if restore_to_image:
logger.info("Preparing to cleanup and restore to {}".format(restore_to_image))
# restore orignial image
@@ -89,7 +89,7 @@ def prepare_ptf(ptfhost, duthost, tbinfo):
@pytest.fixture(scope="module")
-def ptf_params(duthosts, rand_one_dut_hostname, nbrhosts, creds, tbinfo):
+def ptf_params(duthosts, rand_one_dut_hostname, creds, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
if duthost.facts['platform'] == 'x86_64-kvm_x86_64-r0':
@@ -105,12 +105,10 @@ def ptf_params(duthosts, rand_one_dut_hostname, nbrhosts, creds, tbinfo):
lo_v6_prefix = str(ipaddr.IPNetwork(intf['addr'] + '/64').network) + '/64'
break
- vm_hosts = []
- nbrs = nbrhosts
- for key, value in nbrs.items():
- #TODO:Update to vm_hosts.append(value['host'].host.mgmt_ip)
- vm_hosts.append(value['host'].host.options['inventory_manager'].get_host(value['host'].hostname).vars['ansible_host'])
-
+ mgFacts = duthost.get_extended_minigraph_facts(tbinfo)
+ vm_hosts = [
+ attr['mgmt_addr'] for dev, attr in mgFacts['minigraph_devices'].items() if attr['hwsku'] == 'Arista-VM'
+ ]
sonicadmin_alt_password = duthost.host.options['variable_manager']._hostvars[duthost.hostname].get("ansible_altpassword")
ptf_params = {
"verbose": False,
@@ -135,16 +133,15 @@ def ptf_params(duthosts, rand_one_dut_hostname, nbrhosts, creds, tbinfo):
return ptf_params
-def get_reboot_type(duthost):
- next_os_version = duthost.shell('sonic_installer list | grep Next | cut -f2 -d " "')['stdout']
- current_os_version = duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
-
- # warm-reboot has to be forced for an upgrade from 201811 to 201811+ to bypass ASIC config changed error
- if 'SONiC-OS-201811' in current_os_version and 'SONiC-OS-201811' not in next_os_version:
- reboot_type = "warm-reboot -f"
- else:
- reboot_type = "warm-reboot"
- return reboot_type
+def get_reboot_command(duthost, upgrade_type):
+ reboot_command = reboot_ctrl_dict.get(upgrade_type).get("command")
+ if upgrade_type == REBOOT_TYPE_WARM:
+ next_os_version = duthost.shell('sonic_installer list | grep Next | cut -f2 -d " "')['stdout']
+ current_os_version = duthost.shell('sonic_installer list | grep Current | cut -f2 -d " "')['stdout']
+ # warm-reboot has to be forced for an upgrade from 201811 to 201811+ to bypass ASIC config changed error
+ if 'SONiC-OS-201811' in current_os_version and 'SONiC-OS-201811' not in next_os_version:
+ reboot_command = "warm-reboot -f"
+ return reboot_command
def check_sonic_version(duthost, target_version):
@@ -182,6 +179,7 @@ def install_sonic(duthost, image_url, tbinfo):
duthost.shell("mkdir -p /tmp/tmpfs", module_ignore_errors=True)
duthost.shell("umount /tmp/tmpfs", module_ignore_errors=True)
duthost.shell("mount -t tmpfs -o size=1300M tmpfs /tmp/tmpfs", module_ignore_errors=True)
+ logger.info("Image exists locally. Copying the image {} into the device path {}".format(image_url, save_as))
duthost.copy(src=image_url, dest=save_as)
res = duthost.reduce_and_add_sonic_images(save_as=save_as)
@@ -213,7 +211,7 @@ def check_services(duthost):
@pytest.mark.device_type('vs')
def test_upgrade_path(localhost, duthosts, rand_one_dut_hostname, ptfhost, upgrade_path_lists, ptf_params, setup, tbinfo):
duthost = duthosts[rand_one_dut_hostname]
- from_list_images, to_list_images, _ = upgrade_path_lists
+ upgrade_type, from_list_images, to_list_images, _ = upgrade_path_lists
from_list = from_list_images.split(',')
to_list = to_list_images.split(',')
assert (from_list and to_list)
@@ -233,20 +231,23 @@ def test_upgrade_path(localhost, duthosts, rand_one_dut_hostname, ptfhost, upgra
target_version = install_sonic(duthost, to_image, tbinfo)
test_params = ptf_params
test_params['target_version'] = target_version
- test_params['reboot_type'] = get_reboot_type(duthost)
+ test_params['reboot_type'] = get_reboot_command(duthost, upgrade_type)
prepare_testbed_ssh_keys(duthost, ptfhost, test_params['dut_username'])
log_file = "/tmp/advanced-reboot.ReloadTest.{}.log".format(datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
-
- ptf_runner(ptfhost,
- "ptftests",
- "advanced-reboot.ReloadTest",
- platform_dir="ptftests",
- params=test_params,
- platform="remote",
- qlen=10000,
- log_file=log_file)
+ if test_params['reboot_type'] == reboot_ctrl_dict.get(REBOOT_TYPE_COLD).get("command"):
+ # advance-reboot test (on ptf) does not support cold reboot yet
+ reboot(duthost, localhost)
+ else:
+ ptf_runner(ptfhost,
+ "ptftests",
+ "advanced-reboot.ReloadTest",
+ platform_dir="ptftests",
+ params=test_params,
+ platform="remote",
+ qlen=10000,
+ log_file=log_file)
reboot_cause = get_reboot_cause(duthost)
- logger.info("Check reboot cause. Expected cause {}".format(REBOOT_TYPE_WARM))
- pytest_assert(reboot_cause == REBOOT_TYPE_WARM, "Reboot cause {} did not match the trigger - {}".format(reboot_cause, REBOOT_TYPE_WARM))
+ logger.info("Check reboot cause. Expected cause {}".format(upgrade_type))
+ pytest_assert(reboot_cause == upgrade_type, "Reboot cause {} did not match the trigger - {}".format(reboot_cause, upgrade_type))
check_services(duthost)
diff --git a/tests/voq/__init__.py b/tests/voq/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/tests/voq/test_voq_init.py b/tests/voq/test_voq_init.py
new file mode 100644
index 00000000000..17eec6d8c9d
--- /dev/null
+++ b/tests/voq/test_voq_init.py
@@ -0,0 +1,436 @@
+"""Test initialization of VoQ objects, switch, system ports, router interfaces, neighbors, inband port."""
+import json
+import logging
+import pytest
+from tests.common.helpers.assertions import pytest_assert
+
+from tests.common.helpers.redis import AsicDbCli, RedisKeyNotFound
+from tests.common.errors import RunAnsibleModuleFail
+from voq_helpers import check_local_neighbor, check_voq_remote_neighbor, get_sonic_mac, get_neighbor_mac
+from voq_helpers import check_local_neighbor_asicdb, get_device_system_ports, get_inband_info, get_port_by_ip
+from voq_helpers import check_rif_on_sup, check_voq_neighbor_on_sup, find_system_port
+
+pytestmark = [
+ pytest.mark.topology('t2')
+]
+
+logger = logging.getLogger(__name__)
+
+
+@pytest.fixture(scope="module", autouse=True)
+def chassis_facts(duthosts):
+ """
+ Fixture to add some items to host facts from inventory file.
+ """
+ for a_host in duthosts.nodes:
+
+ if len(duthosts.supervisor_nodes) > 0:
+ out = a_host.command("cat /etc/sonic/card_details.json")
+ card_details = json.loads(out['stdout'])
+ if 'slot_num' in card_details:
+ a_host.facts['slot_num'] = card_details['slot_num']
+
+
+@pytest.fixture(scope="module")
+def nbrhosts_facts(nbrhosts):
+ nbrhosts_facts = {}
+ for a_vm in nbrhosts:
+ try:
+ vm_facts = nbrhosts[a_vm]['host'].eos_facts()
+ except RunAnsibleModuleFail:
+ logger.error("VM: %s is down, skipping config fetching.", a_vm)
+ continue
+ logger.debug("vm facts: {}".format(json.dumps(vm_facts, indent=4)))
+ nbrhosts_facts[a_vm] = vm_facts
+ return nbrhosts_facts
+
+
+def test_voq_switch_create(duthosts):
+ """Compare the config facts with the asic db for switch:
+ * Verify ASIC_DB get all system ports referenced in configDB created on all hosts and ASICs.
+ * Verify object creation and values of port attributes.
+ """
+
+ switch_id_list = []
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_facts = cfg_facts['DEVICE_METADATA']['localhost']
+ asicdb = AsicDbCli(asic)
+
+ switchkey = asicdb.get_switch_key()
+ logger.info("Checking switch %s", switchkey)
+ check_list = {
+ "max_cores": "SAI_SWITCH_ATTR_MAX_SYSTEM_CORES",
+ "switch_id": "SAI_SWITCH_ATTR_SWITCH_ID"}
+ for k in check_list:
+ asicdb.get_and_check_key_value(switchkey, dev_facts[k], field=check_list[k])
+
+ pytest_assert(dev_facts["switch_id"] not in switch_id_list,
+ "Switch ID: %s has been used more than once" % dev_facts["switch_id"])
+ switch_id_list.append(dev_facts["switch_id"])
+
+ asicdb.get_and_check_key_value(switchkey, "SAI_SWITCH_TYPE_VOQ", field="SAI_SWITCH_ATTR_TYPE")
+
+
+def test_voq_system_port_create(duthosts):
+ """Compare the config facts with the asic db for system ports
+
+ * Verify ASIC_DB get all system ports referenced in configDB created on all hosts and ASICs.
+ * Verify object creation and values of port attributes.
+
+ """
+
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ logger.info("Checking system ports on host: %s, asic: %s", per_host.hostname, asic.asic_index)
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_ports = get_device_system_ports(cfg_facts)
+ asicdb = AsicDbCli(asic)
+ keylist = asicdb.get_system_port_key_list()
+ pytest_assert(len(keylist) == len(dev_ports.keys()),
+ "Found %d system port keys, %d entries in cfg_facts, not matching" % (
+ len(keylist), len(dev_ports.keys())))
+ logger.info("Found %d system port keys, %d entries in cfg_facts, checking each.",
+ len(keylist), len(dev_ports.keys()))
+ for portkey in keylist:
+ try:
+ port_output = asicdb.hget_key_value(portkey, field="SAI_SYSTEM_PORT_ATTR_CONFIG_INFO")
+ except RedisKeyNotFound:
+ # TODO: Need to check on behavior here.
+ logger.warning("System port: %s had no SAI_SYSTEM_PORT_ATTR_CONFIG_INFO", portkey)
+ continue
+ port_data = json.loads(port_output)
+ for cfg_port in dev_ports:
+ if dev_ports[cfg_port]['system_port_id'] == port_data['port_id']:
+ # "switch_id": "0",
+ # "core_index": "1",
+ # "core_port_index": "6",
+ # "speed": "400000"
+ pytest_assert(dev_ports[cfg_port]['switch_id'] == port_data[
+ 'attached_switch_id'], "switch IDs do not match for port: %s" % portkey)
+ pytest_assert(dev_ports[cfg_port]['core_index'] == port_data[
+ 'attached_core_index'], "switch IDs do not match for port: %s" % portkey)
+ pytest_assert(dev_ports[cfg_port]['core_port_index'] == port_data[
+ 'attached_core_port_index'], "switch IDs do not match for port: %s" % portkey)
+ pytest_assert(dev_ports[cfg_port]['speed'] == port_data[
+ 'speed'], "switch IDs do not match for port: %s" % portkey)
+ break
+ else:
+ logger.error("Could not find config entry for portkey: %s" % portkey)
+
+ logger.info("Host: %s, Asic: %s all ports match all parameters", per_host.hostname, asic.asic_index)
+
+
+def test_voq_local_port_create(duthosts):
+ """Compare the config facts with the asic db for local ports
+
+ * Verify ASIC_DB has host interface information for all local ports on all cards and ASICs.
+ * Verify host interfaces exist on host CLI (ifconfig).
+ * Verify interfaces exist in show interfaces on the linecard.
+ """
+
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_ports = cfg_facts['PORT']
+
+ asicdb = AsicDbCli(asic)
+
+ keylist = asicdb.get_hostif_list()
+ pytest_assert(len(keylist) == len(dev_ports.keys()),
+ "Found %d hostif keys, %d entries in cfg_facts" % (len(keylist), len(dev_ports.keys())))
+ logger.info("Found %s ports to check on host:%s, asic: %s.", len(dev_ports.keys()), per_host.hostname,
+ asic.asic_index)
+
+ show_intf = asic.show_interface(command="status")['ansible_facts']
+ for portkey in keylist:
+ port_name = asicdb.hget_key_value(portkey, "SAI_HOSTIF_ATTR_NAME")
+ port_state = asicdb.hget_key_value(portkey, "SAI_HOSTIF_ATTR_OPER_STATUS")
+ port_type = asicdb.hget_key_value(portkey, "SAI_HOSTIF_ATTR_TYPE")
+ logger.info("Checking port: %s, state: %s", port_name, port_state)
+ # "SAI_HOSTIF_ATTR_NAME": "Ethernet0",
+ # "SAI_HOSTIF_ATTR_OBJ_ID": "oid:0x1000000000002",
+ # "SAI_HOSTIF_ATTR_OPER_STATUS": "false",
+ # "SAI_HOSTIF_ATTR_TYPE": "SAI_HOSTIF_TYPE_NETDEV"
+ pytest_assert(port_type == "SAI_HOSTIF_TYPE_NETDEV", "Port %s is not type netdev" % portkey)
+ if port_state == "true":
+ pytest_assert(show_intf['int_status'][port_name]['oper_state'] == "up",
+ "Show interface state is down when it should be up")
+ if port_state == "false":
+ pytest_assert(show_intf['int_status'][port_name]['oper_state'] == "down",
+ "Show interface state is up when it should be down")
+
+ if asic.namespace is None:
+ cmd = "sudo ifconfig %s" % port_name
+ else:
+ cmd = "sudo ip netns exec %s ifconfig %s" % (asic.namespace, port_name)
+ ifout = per_host.command(cmd)
+ assert "not found" not in ifout['stdout_lines'][0], "Interface %s not found" % port_name
+ if port_state == "true" and "RUNNING" in ifout['stdout_lines'][0]:
+ logger.debug("Interface state is up and matches")
+ elif port_state == "false" and "RUNNING" not in ifout['stdout_lines'][0]:
+ logger.debug("Interface state is down and matches")
+ else:
+ raise AssertionError("Interface state does not match: %s %s", port_state, ifout['stdout_lines'][0])
+
+
+def test_voq_interface_create(duthosts):
+ """
+ Verify router interfaces are created on all line cards and present in Chassis App Db.
+
+ * Verify router interface creation on local ports in ASIC DB.
+ * PORT_ID should match system port table and traced back to config_db.json, mac and MTU should match as well.
+ * Verify SYSTEM_INTERFACE table in Chassis AppDb (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify creation interfaces with different MTUs in configdb.json.
+ * Verify creation of different subnet masks in configdb.json.
+ * Repeat with IPv4, IPv6, dual-stack.
+
+ """
+ for per_host in duthosts.frontend_nodes:
+ logger.info("Check router interfaces on node: %s", per_host.hostname)
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_intfs = cfg_facts['INTERFACE']
+ dev_sysports = get_device_system_ports(cfg_facts)
+
+ slot = per_host.facts['slot_num']
+ rif_ports_in_asicdb = []
+
+ # intf_list = get_router_interface_list(dev_intfs)
+ asicdb = AsicDbCli(asic)
+
+ asicdb_intf_key_list = asicdb.get_router_if_list()
+ # Check each rif in the asicdb, if it is local port, check VOQ DB for correct RIF.
+ # If it is on system port, verify slot/asic/port and OID match a RIF in VoQDB
+ for rif in asicdb_intf_key_list:
+ rif_type = asicdb.hget_key_value(rif, "SAI_ROUTER_INTERFACE_ATTR_TYPE")
+ if rif_type != "SAI_ROUTER_INTERFACE_TYPE_PORT":
+ logger.info("Skip this rif: %s, it is not on a port: %s", rif, rif_type)
+ continue
+ else:
+ portid = asicdb.hget_key_value(rif, "SAI_ROUTER_INTERFACE_ATTR_PORT_ID")
+ logger.info("Process RIF %s, Find port with ID: %s", rif, portid)
+
+ porttype = asicdb.get_rif_porttype(portid)
+ logger.info("RIF: %s is of type: %s", rif, porttype)
+ if porttype == 'hostif':
+ # find the hostif entry to get the physical port the router interface is on.
+ hostifkey = asicdb.find_hostif_by_portid(portid)
+ hostif = asicdb.hget_key_value(hostifkey, 'SAI_HOSTIF_ATTR_NAME')
+ logger.info("RIF: %s is on local port: %s", rif, hostif)
+ rif_ports_in_asicdb.append(hostif)
+ if hostif not in dev_intfs:
+ pytest.fail("Port: %s has a router interface, but it isn't in configdb." % portid)
+
+ # check MTU and ethernet address
+ asicdb.get_and_check_key_value(rif, cfg_facts['PORT'][hostif]['mtu'],
+ field="SAI_ROUTER_INTERFACE_ATTR_MTU")
+ intf_mac = get_sonic_mac(per_host, asic.asic_index, hostif)
+ asicdb.get_and_check_key_value(rif, intf_mac, field="SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS")
+
+ sup_rif = asicdb.hget_key_value("VIDTORID", "oid:" + rif.split(":")[3])
+ sysport_info = find_system_port(dev_sysports, slot, asic.asic_index, hostif)
+ for sup in duthosts.supervisor_nodes:
+ check_rif_on_sup(sup, sup_rif, sysport_info['slot'], sysport_info['asic'], hostif)
+
+ elif porttype == 'sysport':
+ try:
+ port_output = asicdb.hget_key_value("ASIC_STATE:SAI_OBJECT_TYPE_SYSTEM_PORT:" + portid,
+ field="SAI_SYSTEM_PORT_ATTR_CONFIG_INFO")
+ except RedisKeyNotFound:
+ # not a hostif or system port, log error and continue
+ logger.error("Did not find OID %s in local or system tables" % portid)
+ continue
+ port_data = json.loads(port_output)
+ for cfg_port in dev_sysports:
+ if dev_sysports[cfg_port]['system_port_id'] == port_data['port_id']:
+ logger.info("RIF: %s is on remote port: %s", rif, cfg_port)
+ break
+ else:
+ raise AssertionError("Did not find OID %s in local or system tables" % portid)
+
+ sys_slot, sys_asic, sys_port = cfg_port.split("|")
+ sup_rif = asicdb.hget_key_value("VIDTORID", "oid:" + rif.split(":")[3])
+ for sup in duthosts.supervisor_nodes:
+ check_rif_on_sup(sup, sup_rif, sys_slot, sys_asic, sys_port)
+
+ elif porttype == 'port':
+ # this is the RIF on the inband port.
+ inband = get_inband_info(cfg_facts)
+ logger.info("RIF: %s is on local port: %s", rif, inband['port'])
+
+ # check MTU and ethernet address
+ asicdb.get_and_check_key_value(rif, cfg_facts['PORT'][inband['port']]['mtu'],
+ field="SAI_ROUTER_INTERFACE_ATTR_MTU")
+ intf_mac = get_sonic_mac(per_host, asic.asic_index, inband['port'])
+ asicdb.get_and_check_key_value(rif, intf_mac, field="SAI_ROUTER_INTERFACE_ATTR_SRC_MAC_ADDRESS")
+
+ sup_rif = asicdb.hget_key_value("VIDTORID", "oid:" + rif.split(":")[3])
+ sysport_info = find_system_port(dev_sysports, slot, asic.asic_index, inband['port'])
+ for sup in duthosts.supervisor_nodes:
+ check_rif_on_sup(sup, sup_rif, sysport_info['slot'], sysport_info['asic'], inband['port'])
+
+ # Verify each RIF in config had a corresponding local port RIF in the asicDB.
+ for rif in dev_intfs:
+ pytest_assert(rif in rif_ports_in_asicdb, "Interface %s is in configdb.json but not in asicdb" % rif)
+ logger.info("Interfaces %s are present in configdb.json and asicdb" % str(dev_intfs.keys()))
+
+
+def test_voq_neighbor_create(duthosts, nbrhosts, nbrhosts_facts):
+ """
+ Verify neighbor entries are created on linecards for local and remote VMS.
+
+ For local neighbors:
+ * ARP/NDP should be resolved when BGP to adjacent VMs is established.
+ * On local linecard, verify ASIC DB entries.
+ * MAC address matches MAC of neighbor VM.
+ * Router interface OID matches back to the correct interface and port the neighbor was learned on.
+ * On local linecard, verify show arp/ndp, ip neigh commands.
+ * MAC address matches MAC of neighbor VM.
+ * On local linecard. verify neighbor table in appDB.
+ * MAC address matches MAC of neighbor VM.
+ * On supervisor card, verify SYSTEM_NEIGH table in Chassis AppDB (redis-dump -h -p 6380 -d 12 on supervisor).
+ * Verify encap index and MAC address match between ASICDB the Chassis AppDB
+ * Repeat with IPv4, IPv6, dual-stack.
+
+ For remote neighbors:
+ * When local neighbors are established as in the Local Neighbor testcase, corresponding entries will be established
+ on all other line cards. On each remote card, verify:
+ * Verify ASIC DB entries on remote linecards.
+ * Verify impose index=True in ASIC DB.
+ * Verify MAC address in ASIC DB is the remote neighbor mac.
+ * Verify encap index for ASIC DB entry matches Chassis App DB.
+ * Verify router interface OID matches the interface the neighbor was learned on.
+ * Verify on linecard CLI, show arp/ndp, ip neigh commands.
+ * For inband port, MAC should be inband port mac in kernel table and LC appDb.
+ * For inband vlan mode, MAC will be remote ASIC mac in kernel table and LC appdb.
+ * Verify neighbor table in linecard appdb.
+ * Verify static route is installed in kernel routing table with /32 (or /128 for IPv6) for neighbor entry.
+ * Repeat with IPv4, IPv6, dual-stack.
+
+ """
+
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ logger.info("Checking local neighbors on host: %s, asic: %s", per_host.hostname, asic.asic_index)
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_sysports = get_device_system_ports(cfg_facts)
+ neighs = cfg_facts['BGP_NEIGHBOR']
+ inband_info = get_inband_info(cfg_facts)
+
+ # Check each neighbor in table
+ for neighbor in neighs:
+ local_ip = neighs[neighbor]['local_addr']
+ if local_ip == inband_info['ipv4_addr'] or local_ip == inband_info['ipv6_addr']:
+ # skip inband neighbors
+ continue
+
+ # Check neighbor on local linecard
+ local_port = get_port_by_ip(cfg_facts, local_ip)
+ show_intf = asic.show_interface(command="status")['ansible_facts']
+ if local_port is None:
+ logger.error("Did not find port for this neighbor %s, must skip", local_ip)
+ continue
+ elif "portchannel" in local_port.lower():
+ # TODO: LAG support
+ logger.info("Port channel is not supported yet by this test, skip port: %s", local_port)
+ continue
+ if show_intf['int_status'][local_port]['oper_state'] == "down":
+ logger.error("Port is down, must skip interface: %s, IP: %s", local_port, local_ip)
+ continue
+
+ neigh_mac = get_neighbor_mac(neighbor, nbrhosts, nbrhosts_facts)
+ if neigh_mac is None:
+ logger.error("Could not find neighbor MAC, must skip. IP: %s, port: %s", local_ip, local_port)
+
+ local_dict = check_local_neighbor(per_host, asic, neighbor, neigh_mac, local_port)
+ logger.info("Local_dict: %s", local_dict)
+
+ # Check the same neighbor entry on the supervisor nodes
+ sysport_info = find_system_port(dev_sysports, per_host.facts['slot_num'], asic.asic_index, local_port)
+ for sup in duthosts.supervisor_nodes:
+ check_voq_neighbor_on_sup(sup, sysport_info['slot'], sysport_info['asic'], local_port,
+ neighbor, local_dict['encap_index'], neigh_mac)
+
+ # Check the neighbor entry on each remote linecard
+ for rem_host in duthosts.frontend_nodes:
+
+ for rem_asic in rem_host.asics:
+ if rem_host == per_host and rem_asic == asic:
+ # skip remote check on local host
+ continue
+ rem_cfg_facts = rem_asic.config_facts(source="persistent")['ansible_facts']
+ remote_inband_info = get_inband_info(rem_cfg_facts)
+ remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])
+ check_voq_remote_neighbor(rem_host, rem_asic, neighbor, neigh_mac, remote_inband_info['port'],
+ local_dict['encap_index'], remote_inband_mac)
+
+
+def test_voq_inband_port_create(duthosts):
+ """
+ Test inband port creation.
+
+ These steps are covered by previous test cases:
+ * On each linecard, verify inband ports are present in ASICDB.
+ * On each linecard, verify inband router interfaces are present in ASICDB
+ * On supervisor card, verify inband router interfaces are present in Chassis App DB
+
+ This test function will cover:
+ * On each linecard, verify permanent neighbors for all inband ports.
+ * On each linecard, verify kernel routes for all inband ports.
+ * Repeat with IPv4, IPv6, dual-stack.
+
+
+ """
+ for per_host in duthosts.frontend_nodes:
+
+ for asic in per_host.asics:
+ cfg_facts = asic.config_facts(source="persistent")['ansible_facts']
+ dev_sysports = get_device_system_ports(cfg_facts)
+ inband_info = get_inband_info(cfg_facts)
+ inband_mac = get_sonic_mac(per_host, asic.asic_index, inband_info['port'])
+
+ inband_ips = []
+ if 'ipv6_addr' in inband_info:
+ inband_ips.append(inband_info['ipv6_addr'])
+ if 'ipv4_addr' in inband_info:
+ inband_ips.append(inband_info['ipv4_addr'])
+
+ for neighbor_ip in inband_ips:
+
+ host = per_host
+ neighbor_mac = inband_mac
+ interface = inband_info['port']
+
+ logger.info("Check local neighbor on host %s, asic %s for %s/%s via port: %s", host.hostname,
+ str(asic.asic_index),
+ neighbor_ip, neighbor_mac, interface)
+
+ asic_dict = check_local_neighbor_asicdb(asic, neighbor_ip, neighbor_mac)
+ encap_idx = asic_dict['encap_index']
+
+ # Check the inband neighbor entry on the supervisor nodes
+ sysport_info = find_system_port(dev_sysports, per_host.facts['slot_num'], asic.asic_index, interface)
+ for sup in duthosts.supervisor_nodes:
+ check_voq_neighbor_on_sup(sup, sysport_info['slot'], sysport_info['asic'], interface, neighbor_ip,
+ encap_idx, inband_mac)
+
+ # Check the neighbor entry on each remote linecard
+ for rem_host in duthosts.frontend_nodes:
+
+ for rem_asic in rem_host.asics:
+ if rem_host == per_host and rem_asic == asic:
+ # skip remote check on local host
+ continue
+ rem_cfg_facts = rem_asic.config_facts(source="persistent")['ansible_facts']
+ remote_inband_info = get_inband_info(rem_cfg_facts)
+ remote_inband_mac = get_sonic_mac(rem_host, rem_asic.asic_index, remote_inband_info['port'])
+ check_voq_remote_neighbor(rem_host, rem_asic, neighbor_ip, inband_mac,
+ remote_inband_info['port'],
+ encap_idx, remote_inband_mac)
diff --git a/tests/voq/voq_helpers.py b/tests/voq/voq_helpers.py
new file mode 100644
index 00000000000..c2d0b3a8268
--- /dev/null
+++ b/tests/voq/voq_helpers.py
@@ -0,0 +1,484 @@
+import json
+import logging
+import re
+from tests.common.helpers.assertions import pytest_assert
+from tests.common.helpers.redis import AsicDbCli, AppDbCli, VoqDbCli
+
+logger = logging.getLogger(__name__)
+
+
+def check_host_arp_table(host, neighbor_ip, neighbor_mac, interface, state):
+ """
+ Validates the ARP table of a host by running ip neigh for a single neighbor.
+
+ Args:
+ host: instance of SonicHost to run the arp show.
+ neighbor_ip: IP address of the neighbor to verify.
+ neighbor_mac: MAC address expected in the show command output.
+ interface: Port expected in the show command output.
+ state: ARP entry state expected in the show command output.
+
+ """
+ arptable = host.switch_arptable()['ansible_facts']
+ logger.debug("ARP: %s", arptable)
+ if ':' in neighbor_ip:
+ table = arptable['arptable']['v6']
+ else:
+ table = arptable['arptable']['v4']
+ pytest_assert(neighbor_ip in table, "IP %s not in arp list: %s" % (neighbor_ip, table.keys()))
+ pytest_assert(table[neighbor_ip]['macaddress'] == neighbor_mac,
+ "table MAC %s does not match neighbor mac: %s" % (table[neighbor_ip]['macaddress'], neighbor_mac))
+ pytest_assert(table[neighbor_ip]['interface'] == interface,
+ "table interface %s does not match interface: %s" % (table[neighbor_ip]['interface'], interface))
+ pytest_assert(table[neighbor_ip]['state'].lower() == state.lower(),
+ "table state %s is not %s" % (table[neighbor_ip]['state'].lower(), state.lower()))
+
+
+def check_local_neighbor_asicdb(asic, neighbor_ip, neighbor_mac):
+ """
+ Verifies the neighbor information of a sonic host in the asicdb for a locally attached neighbor.
+
+ Args:
+ asic: The SonicAsic instance to be checked.
+ neighbor_ip: The IP address of the neighbor.
+ neighbor_mac: The MAC address of the neighbor.
+
+ Returns:
+ A dictionary with the encap ID from the ASIC neighbor table.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ asicdb = AsicDbCli(asic)
+ neighbor_key = asicdb.get_neighbor_key_by_ip(neighbor_ip)
+ pytest_assert(neighbor_key is not None, "Did not find neighbor in asictable for IP: %s" % neighbor_ip)
+ asic_mac = asicdb.get_neighbor_value(neighbor_key, 'SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS')
+ pytest_assert(asic_mac.lower() == neighbor_mac.lower(),
+ "MAC does not match in asicDB, asic %s, device %s" % (asic_mac.lower(), neighbor_mac.lower()))
+ encap_idx = asicdb.get_neighbor_value(neighbor_key, 'SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX')
+ return {"encap_index": encap_idx}
+
+
+def check_local_neighbor(host, asic, neighbor_ip, neighbor_mac, interface):
+ """
+ Verifies the neighbor information of a sonic host for a locally attached neighbor.
+
+ The ASIC DB, APP DB, and host ARP table are checked.
+
+ Args:
+ host: Instance of SonicHost to check.
+ asic: Instance of SonicAsic to check.
+ neighbor_ip: IP address if the neighbor to check.
+ neighbor_mac: Expected ethernet MAC address of the neighbor.
+ interface: Expected interface the neighbor was learned on.
+
+ Returns:
+ A dictionary with the key into the LC APP DB neighbor table and the encap ID from the ASIC DB neighbor table.
+ {'encap_index': u'1074790408',
+ 'neighbor_key': u'NEIGH_TABLE:Ethernet10:2064:103::1'}
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ logger.info("Check local neighbor on host %s, asic %s for %s/%s via port: %s", host.hostname, str(asic.asic_index),
+ neighbor_ip, neighbor_mac, interface)
+
+ # verify asic db
+ asic_dict = check_local_neighbor_asicdb(asic, neighbor_ip, neighbor_mac)
+
+ # verify LC appdb
+ appdb = AppDbCli(asic)
+ neighbor_key = appdb.get_neighbor_key_by_ip(neighbor_ip)
+ appdb.get_and_check_key_value(neighbor_key, neighbor_mac, field="neigh")
+ pytest_assert(":{}:".format(interface) in neighbor_key, "Port for %s does not match" % neighbor_key)
+
+ # verify linux arp table
+ check_host_arp_table(host, neighbor_ip, neighbor_mac, interface, 'REACHABLE')
+
+ return {'neighbor_key': neighbor_key, 'encap_index': asic_dict['encap_index']}
+
+
+def check_bgp_kernel_route(host, asicnum, prefix, ipver, interface, present=True):
+ """
+ Checks the kernel route is installed from the bgp container.
+
+ Args:
+ host: sonic duthost instance to check.
+ asicnum: asic index to check.
+ prefix: IP address plus mask to check in routing table.
+ ipver: ip or ipv6.
+ interface: Attached interface for the neighbor route.
+ present: Optional; Check whether route is installed or removed.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ docker = "bgp"
+ if host.facts["num_asic"] > 1:
+ docker = "bgp" + str(asicnum)
+
+ output = host.command("docker exec " + docker + " vtysh -c \"show {} route {} json\"".format(ipver, prefix))
+ parsed = json.loads(output["stdout"])
+ if present is True:
+ pytest_assert(prefix in parsed.keys(), "Prefix: %s not in route list: %s" % (prefix, parsed.keys()))
+ for route in parsed[prefix]:
+ if route['distance'] != 0:
+ found = False
+ continue
+ pytest_assert(route['protocol'] == "kernel", "Prefix: %s not kernel route" % prefix)
+ pytest_assert(route['nexthops'][0]['directlyConnected'] is True,
+ "Prefix: %s not directly connected" % prefix)
+ pytest_assert(route['nexthops'][0]['active'] is True, "Prefix: %s not active" % prefix)
+ pytest_assert(route['nexthops'][0]['interfaceName'] == interface,
+ "Prefix: %s out interface is not correct" % prefix)
+
+ found = True
+ break
+ pytest_assert(found, "Kernel route is not present in bgp output: %s" % parsed[prefix])
+ logger.info("Route %s is present in remote neighbor: %s/%s", prefix, host.hostname, str(asicnum))
+
+
+def check_host_kernel_route(host, asicnum, ipaddr, ipver, interface, present=True):
+ """
+ Checks the kernel route on the host OS.
+
+ Args:
+ host: sonic duthost instance to check.
+ asicnum: asic index to check.
+ ipaddr: IP address to check in routing table.
+ ipver: ip or ipv6.
+ interface: Attached interface for the neighbor route.
+ present: Optional; Check whether route is installed or removed.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ ver = "-4" if ipver == "ip" else "-6"
+ if host.facts["num_asic"] == 1:
+ cmd = "ip {} route show exact {}".format(ver, ipaddr)
+ else:
+ cmd = "ip netns exec asic{} ip {} route show exact {}".format(asicnum, ver, ipaddr)
+ logger.debug("Kernel rt cmd: %s", cmd)
+ output = host.command(cmd)['stdout']
+ if present is True:
+ logger.info("host ip route output: %s", output)
+ pytest_assert(output.startswith(ipaddr), "Address: %s not in netstat output list: %s" % (ipaddr, output))
+ pytest_assert("dev %s" % interface in output, "Interface is not %s: %s" % (interface, output))
+
+
+def check_neighbor_kernel_route(host, asicnum, ipaddr, interface, present=True):
+ """
+ Verifies if a neighbor kernel route is installed or not.
+
+ Checks BGP docker and linux kernel route tables.
+
+ Args:
+ host: sonic duthost instance to check.
+ asicnum: asic index to check.
+ ipaddr: IP address to check in routing table. Mask will be applied by this function.
+ interface: Attached interface for the neighbor route.
+ present: Optional; Check whether route is installed or removed.
+ """
+ if ":" in ipaddr:
+ ipver = "ipv6"
+ prefix = ipaddr + "/128"
+ else:
+ ipver = "ip"
+ prefix = ipaddr + "/32"
+ check_bgp_kernel_route(host, asicnum, prefix, ipver, interface, present)
+ check_host_kernel_route(host, asicnum, ipaddr, ipver, interface, present)
+
+
+def check_voq_remote_neighbor(host, asic, neighbor_ip, neighbor_mac, interface, encap_idx, inband_mac):
+ """
+ Verifies the neighbor information of a neighbor learned on a different host.
+
+ The ASIC DB, APP DB, and host ARP table are checked. The host kernal route is verified. The encap ID from the
+ local neighbor is provided as a parameter and verified that it is imposed.
+
+ Args:
+ host: Instance of SonicHost to check.
+ asic: Instance of SonicAsic to check.
+ neighbor_ip: IP address if the neighbor to check.
+ neighbor_mac: Expected ethernet MAC address of the neighbor.
+ interface: Expected interface the neighbor was learned on.
+ encap_idx: The encap index from the SONIC host the neighbor is directly attached to.
+ inband_mac: The MAC of the inband port of the remote host.
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+ """
+ logger.info("Check remote neighbor on host %s, asic: %s for %s/%s via port: %s", host.hostname,
+ str(asic.asic_index), neighbor_ip, neighbor_mac, interface)
+
+ # asic db
+ asicdb = AsicDbCli(asic)
+ neighbor_key = asicdb.get_neighbor_key_by_ip(neighbor_ip)
+ pytest_assert(neighbor_key is not None, "Did not find neighbor in asic table for IP: %s" % neighbor_ip)
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_DST_MAC_ADDRESS').lower() == neighbor_mac.lower(),
+ "MAC does not match in asicDB")
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_INDEX') == encap_idx,
+ "Encap index does not match in asicDB")
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_ENCAP_IMPOSE_INDEX') == "true",
+ "Encap impose is not true in asicDB")
+ pytest_assert(asicdb.get_neighbor_value(neighbor_key,
+ 'SAI_NEIGHBOR_ENTRY_ATTR_IS_LOCAL') == "false",
+ "is local is not false in asicDB")
+
+ # LC app db
+ appdb = AppDbCli(asic)
+ neighbor_key = appdb.get_neighbor_key_by_ip(neighbor_ip)
+ pytest_assert(":{}:".format(interface) in neighbor_key, "Port for %s does not match" % neighbor_key)
+ appdb.get_and_check_key_value(neighbor_key, inband_mac, field="neigh")
+
+ # verify linux arp table
+ check_host_arp_table(host, neighbor_ip, inband_mac, interface, 'PERMANENT')
+
+ # verify linux route entry
+ check_neighbor_kernel_route(host, asic.asic_index, neighbor_ip, interface)
+
+
+def check_rif_on_sup(sup, rif, slot, asic, port):
+ """
+ Checks the router interface entry on the supervisor card.
+
+ Args:
+ sup: duthost for the supervisor card
+ rif: OID of the router interface to check for.
+ slot: The slot number the router interface is on.
+ asic: The asic number the asic is on, or 0 if a single asic card.
+ port: the name of the port (Ethernet1)
+
+ """
+ voqdb = VoqDbCli(sup)
+
+ rif_oid = voqdb.get_router_interface_id(slot, asic, port)
+
+ if rif_oid == rif:
+ logger.info("RIF on sup: %s = %s", rif_oid, rif)
+ elif rif_oid[-10:-1] == rif[-10:-1]:
+ logger.warning("RIF on sup is a partial match: %s != %s", rif_oid, rif)
+ else:
+ logger.error("RIF on sup does not match: %s != %s" % (rif_oid, rif))
+
+
+def check_voq_neighbor_on_sup(sup, slot, asic, port, neighbor, encap_index, mac):
+ """
+ Checks the neighbor entry on the supervisor card.
+
+ Args:
+ sup: duthost for the supervisor card
+ slot: The slot the router interface is on, as in system port table (Slot2).
+ asic: The asic the router interface is on, as in the system port table (Asic0) .
+ port: the name of the port (Ethernet1)
+ neighbor: The IP of the neighbor
+ encap_index: The encap ID of the neighbor from the local asic db
+ mac: The MAC address of the neighbor
+
+ Raises:
+ Pytest Failed exception when assertions fail.
+
+ """
+ voqdb = VoqDbCli(sup)
+ neigh_key = voqdb.get_neighbor_key_by_ip(neighbor)
+ logger.info("Neigh key: %s, slotnum: %s", neigh_key, slot)
+ pytest_assert("|%s|" % slot in neigh_key,
+ "Slot for %s does not match %s" % (neigh_key, slot))
+ pytest_assert("|%s:" % port in neigh_key,
+ "Port for %s does not match %s" % (neigh_key, port))
+ pytest_assert("|%s|" % asic in neigh_key,
+ "Asic for %s does not match %s" % (neigh_key, asic))
+
+ voqdb.get_and_check_key_value(neigh_key, mac, field="neigh")
+ voqdb.get_and_check_key_value(neigh_key, encap_index, field="encap_index")
+
+
+def get_neighbor_mac(neigh_ip, nbrhosts, nbrhosts_facts):
+ """
+ Gets the MAC address of a neighbor IP on an EOS host.
+
+ We need to get the MAC of the VM out of the linux shell, not from the EOS CLI. The MAC used for punt/inject
+ on the EOS seems to be the linux one. Find the interface name on the VM that is associated with the IP address,
+ then look on the linux OS shell for the MAC address of that interface.
+
+ Args:
+ neigh_ip: The IP address of the neighbor.
+ nbrhosts: dictionary provided by the nbrhosts fixture.
+
+ Returns:
+ A string with the MAC address.
+ """
+ nbr_vm = ""
+ nbr_intf = ""
+
+ for a_vm in nbrhosts_facts:
+
+ intfs = nbrhosts_facts[a_vm]['ansible_facts']['ansible_net_interfaces']
+ for intf in intfs:
+ if intfs[intf]['ipv4'] != {} and intfs[intf]['ipv4']['address'] == neigh_ip:
+ nbr_vm = a_vm
+ nbr_intf = intf
+ break
+ if 'ipv6' in intfs[intf] and intfs[intf]['ipv6']['address'].lower() == neigh_ip.lower():
+ nbr_vm = a_vm
+ nbr_intf = intf
+ break
+ if nbr_vm != "":
+ break
+ else:
+ logger.error("Could not find port for neighbor IP: %s", neigh_ip)
+ logger.info("vm facts: {}".format(json.dumps(nbrhosts_facts, indent=4)))
+ return None
+ # convert Ethernet1 to eth1
+ shell_intf = "eth" + nbr_intf[-1]
+ nbrhosts[nbr_vm]['host'].eos_command(commands=["enable"])
+ output = nbrhosts[nbr_vm]['host'].eos_command(commands=["bash ip addr show dev %s" % shell_intf])
+ # 8: Ethernet0: mtu 9100 ...
+ # link/ether a6:69:05:fd:da:5f brd ff:ff:ff:ff:ff:ff
+ mac = output['stdout_lines'][0][1].split()[1]
+ logger.info("mac: %s", mac)
+ return mac
+
+
+def get_sonic_mac(host, asicnum, port):
+ """Gets the MAC address of an SONIC port.
+
+ Args:
+ host: a duthost instance
+ asicnum: The asic number to run on, or empty string.
+ port: The name of the port to get the MAC
+
+ Returns:
+ A string with the MAC address.
+ """
+ if host.facts["num_asic"] == 1:
+ cmd = "sudo ip link show {}".format(port)
+ else:
+ ns = "asic" + str(asicnum)
+ cmd = "sudo ip netns exec {} ip link show {}".format(ns, port)
+ output = host.command(cmd)
+ mac = output['stdout_lines'][1].split()[1]
+ logger.info("host: %s, asic: %d, port: %s, mac: %s", host.hostname, asicnum, port, mac)
+ return mac
+
+
+def get_device_system_ports(cfg_facts):
+ """Returns the system ports from the config facts as a single dictionary, instead of a nested dictionary.
+
+ The ansible module for config facts automatically makes a 2 level nested dictionary when the keys are in the form
+ of part1|part2|part3 or part1|part2. The first dictionary is keyed as "part1" and the nested dictionary is the
+ remainder of the key with the value. This function returns a flat dictionary with the keys restored to their values
+ from the files.
+
+ Args:
+ cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
+
+ Returns:
+ The system port config facts in a single layer dictionary.
+
+ """
+
+ sys_port_slot_dict = cfg_facts['SYSTEM_PORT']
+ merge_dict = {}
+ for slot in sys_port_slot_dict:
+ for port in sys_port_slot_dict[slot]:
+ merge_dict[slot + "|" + port] = sys_port_slot_dict[slot][port]
+ return merge_dict
+
+
+def get_inband_info(cfg_facts):
+ """
+ Returns the inband port and IP addresses present in the configdb.json.
+
+ Args:
+ cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
+
+ Returns:
+ A dictionary with the inband port and IP addresses.
+ """
+
+ intf = cfg_facts['VOQ_INBAND_INTERFACE']
+ ret = {}
+ for a_intf in intf:
+ for addrs in intf[a_intf]:
+ ret['port'] = a_intf
+ intf_ip = addrs.split('/')
+ if ':' in intf_ip[0]:
+ ret['ipv6_addr'] = intf_ip[0]
+ ret['ipv6_mask'] = intf_ip[1]
+ elif ':' not in intf_ip[0]:
+ ret['ipv4_addr'] = intf_ip[0]
+ ret['ipv4_mask'] = intf_ip[1]
+ return ret
+
+
+def get_port_by_ip(cfg_facts, ipaddr):
+ """
+ Returns the port which has a given IP address from the dut config.
+
+ Args:
+ cfg_facts: The "ansible_facts" output from the duthost "config_facts" module.
+ ipaddr: The IP address to search for.
+
+ Returns:
+ A string with the port name or None if not found. ("Ethernet12")
+
+ """
+ if ':' in ipaddr:
+ iptype = "ipv6"
+ else:
+ iptype = "ipv4"
+
+ intf = {}
+ intf.update(cfg_facts['INTERFACE'])
+ if "PORTCHANNEL_INTERFACE" in cfg_facts:
+ intf.update(cfg_facts['PORTCHANNEL_INTERFACE'])
+ for a_intf in intf:
+ for addrs in intf[a_intf]:
+ intf_ip = addrs.split('/')
+ if iptype == 'ipv6' and ':' in intf_ip[0] and intf_ip[0].lower() == ipaddr.lower():
+ return a_intf
+ elif iptype == 'ipv4' and ':' not in intf_ip[0] and intf_ip[0] == ipaddr:
+ return a_intf
+
+ raise Exception("Dod not find port for IP %s" % ipaddr)
+
+
+def find_system_port(dev_sysports, slot, asic_index, hostif):
+ """
+ System key string can be arbitrary text with slot, asic, and port, so try to find the match
+ and return the correct string. ex. "Slot1|asic3|Ethernet12" or "Linecard4|Asic1|Portchannel23"
+
+ Args:
+ dev_sysports: dictionary from config_facts with all of the system ports on the system.
+ slot: The slot number of the system port to find.
+ asic_index: The asic number of ths system port to find.
+ hostif: The interface of the system port to find.
+
+ Returns:
+ A dictionary with the system port text strings.
+
+ Raises:
+ KeyError if the system port can't be found in the dictionary.
+
+ """
+
+ sys_re = re.compile(r'([a-zA-Z]+{})\|([a-zA-Z]+{})\|{}'.format(slot, asic_index, hostif))
+ sys_info = {}
+
+ for sysport in dev_sysports:
+ match = sys_re.match(sysport)
+ if match:
+ sys_info['slot'] = match.group(1)
+ sys_info['asic'] = match.group(2)
+ sys_info['key'] = sysport
+ return sys_info
+
+ raise KeyError("Could not find system port for {}/{}/{}".format(slot, asic_index, hostif))
diff --git a/tests/vrf/test_vrf.py b/tests/vrf/test_vrf.py
index 5d220ebf062..40c3272d37d 100644
--- a/tests/vrf/test_vrf.py
+++ b/tests/vrf/test_vrf.py
@@ -32,7 +32,7 @@
"""
pytestmark = [
- pytest.mark.topology('any')
+ pytest.mark.topology('t0')
]
logger = logging.getLogger(__name__)
@@ -534,7 +534,7 @@ def test_show_bgp_summary(self, duthosts, rand_one_dut_hostname, cfg_facts):
for info in bgp_summary:
for peer, attr in bgp_summary[info]['peers'].iteritems():
- prefix_count = attr['prefixReceivedCount']
+ prefix_count = attr['pfxRcd']
# skip ipv6 peers under 'ipv4Unicast' and compare only ipv4 peers under 'ipv4Unicast', and ipv6 peers under 'ipv6Unicast'
if info == "ipv4Unicast" and attr['idType'] == 'ipv6':
continue
@@ -886,7 +886,7 @@ def test_bgp_with_loopback(self, duthosts, rand_one_dut_hostname, cfg_facts):
assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['state'] == 'Established', \
"Bgp peer {} should be Established!".format(ptf_speaker_ip.ip)
# Verify accepted prefixes of the dynamic neighbors are correct
- assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['prefixReceivedCount'] == 1
+ assert bgp_info['ipv4Unicast']['peers'][str(ptf_speaker_ip.ip)]['pfxRcd'] == 1
class TestVrfWarmReboot():
diff --git a/tests/vxlan/test_vxlan_decap.py b/tests/vxlan/test_vxlan_decap.py
index 0cdc3a14a59..7a9f757ca02 100644
--- a/tests/vxlan/test_vxlan_decap.py
+++ b/tests/vxlan/test_vxlan_decap.py
@@ -149,6 +149,8 @@ def vxlan_status(setup, request, duthosts, rand_one_dut_hostname):
def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfhost, creds):
duthost = duthosts[rand_one_dut_hostname]
+ sonic_admin_alt_password = duthost.host.options['variable_manager']._hostvars[duthost.hostname].get("ansible_altpassword")
+
vxlan_enabled, scenario = vxlan_status
logger.info("vxlan_enabled=%s, scenario=%s" % (vxlan_enabled, scenario))
log_file = "/tmp/vxlan-decap.Vxlan.{}.{}.log".format(scenario, datetime.now().strftime('%Y-%m-%d-%H:%M:%S'))
@@ -161,6 +163,7 @@ def test_vxlan_decap(setup, vxlan_status, duthosts, rand_one_dut_hostname, ptfho
"count": COUNT,
"sonic_admin_user": creds.get('sonicadmin_user'),
"sonic_admin_password": creds.get('sonicadmin_password'),
- "dut_host": duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']},
+ "sonic_admin_alt_password": sonic_admin_alt_password,
+ "dut_hostname": duthost.host.options['inventory_manager'].get_host(duthost.hostname).vars['ansible_host']},
qlen=10000,
log_file=log_file)