diff --git a/tests/common/port_toggle.py b/tests/common/port_toggle.py index e8bfd2fa2ea..d1ebf2fa35b 100644 --- a/tests/common/port_toggle.py +++ b/tests/common/port_toggle.py @@ -1,48 +1,70 @@ +""" +Tool used for shutdown/startup port on the DUT. +""" + import time import logging import pprint +from tests.common.helpers.assertions import pytest_assert +from tests.platform_tests.link_flap.link_flap_utils import watch_system_status +from tests.common.utilities import wait_until + + logger = logging.getLogger(__name__) -def port_toggle(duthost, ports=None, wait=60, wait_after_ports_up=60): +def port_toggle(duthost, ports=None, wait=60, wait_after_ports_up=60, watch=False): """ - Toggle ports on DUT - :param duthost: DUT host object - :param ports: specify list of ports, None if toggle all ports - :param wait: time to wait for interface to become up - :param wait_after_ports_up: time to wait after interfaces become up - :return: + Toggle ports on DUT. + + Args: + duthost: DUT host object + ports: Specify list of ports, None if toggle all ports + wait: Time to wait for interface to become up + wait_after_ports_up: Time to wait after interfaces become up + watch: Logging system state """ + def __check_interface_state(state='up'): + """ + Check interfaces status + + Args: + state: state of DUT's interface + """ + ports_down = duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports'] + + if 'down' in state: + return len(ports_down) == len(ports) + else: + return len(ports_down) == 0 + if ports is None: logger.debug('ports is None, toggling all minigraph ports') mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] ports = mg_facts['minigraph_ports'].keys() - logger.info('toggling ports:\n{}'.format(pprint.pformat(ports))) + logger.info('toggling ports:\n%s', pprint.pformat(ports)) for port in ports: duthost.command('config interface shutdown {}'.format(port)) + if watch: + time.sleep(1) + watch_system_status(duthost) - # verify all interfaces are up - ports_down = duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports'] - assert len(ports_down) == len(ports) + # verify all interfaces are down + pytest_assert(wait_until(3, 1, __check_interface_state, 'down'), + "dut ports {} didn't go down as expected" + .format(list(set(ports).difference(set(duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports']))))) for port in ports: duthost.command('config interface startup {}'.format(port)) logger.info('waiting for ports to become up') - start = time.time() - ports_down = duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports'] - while time.time() - start < wait: - ports_down = duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports'] - logger.info('retry, down ports:\n{}'.format(pprint.pformat(ports_down))) - if len(ports_down) == 0: - break - - assert len(ports_down) == 0 + pytest_assert(wait_until(wait, 1, __check_interface_state), + "dut ports {} didn't go up as expected".format(duthost.interface_facts(up_ports=ports)['ansible_facts']['ansible_interface_link_down_ports'])) - logger.info('wait {} seconds for system to startup'.format(wait_after_ports_up)) + logger.info('wait %d seconds for system to startup', wait_after_ports_up) time.sleep(wait_after_ports_up) diff --git a/tests/platform_tests/conftest.py b/tests/platform_tests/conftest.py index d1ab4b8d9a8..e8b4b88e209 100644 --- a/tests/platform_tests/conftest.py +++ b/tests/platform_tests/conftest.py @@ -1,7 +1,6 @@ import pytest from tests.common.fixtures.advanced_reboot import get_advanced_reboot -from .args.normal_reboot_args import add_normal_reboot_args from .args.advanced_reboot_args import add_advanced_reboot_args from .args.cont_warm_reboot_args import add_cont_warm_reboot_args from .args.normal_reboot_args import add_normal_reboot_args @@ -14,7 +13,25 @@ def skip_on_simx(duthost): pytest.skip('skipped on this platform: {}'.format(platform)) -# Platform pytest arguments +@pytest.fixture() +def bring_up_dut_interfaces(request, duthost): + """ + Bring up outer interfaces on the DUT. + + Args: + request: pytest request object + duthost: Fixture for interacting with the DUT. + """ + yield + if request.node.rep_call.failed: + mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts'] + ports = mg_facts['minigraph_ports'].keys() + + # Enable outer interfaces + for port in ports: + duthost.no_shutdown(ifname=port) + + def pytest_addoption(parser): add_advanced_reboot_args(parser) add_cont_warm_reboot_args(parser) diff --git a/tests/platform_tests/link_flap/__init__.py b/tests/platform_tests/link_flap/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/platform_tests/link_flap/conftest.py b/tests/platform_tests/link_flap/conftest.py new file mode 100644 index 00000000000..d6341065ddd --- /dev/null +++ b/tests/platform_tests/link_flap/conftest.py @@ -0,0 +1,48 @@ +""" +Pytest configuration used by the link flap tests. + +Teardowns used by the link flap tests. +""" + +import time + +import pytest + +from tests.platform_tests.link_flap.link_flap_utils import build_test_candidates +from tests.common.helpers.dut_ports import decode_dut_port_name + +def pytest_addoption(parser): + """ + Adds options to pytest that are used by the Link flap tests. + """ + + parser.addoption( + "--orch_cpu_threshold", + action="store", + type=int, + default=10, + help="Orchagent CPU threshold", + ) + + +@pytest.fixture() +def bring_up_fanout_interfaces(request, all_ports, duthosts, fanouthosts): + """ + Bring up outer interfaces on the DUT. + + Args: + request: pytest request object + duthost: Fixture for interacting with the DUT. + fanouthosts: Fixture for interacting with the fanouts. + """ + yield + if request.node.rep_call.failed: + dutname, portname = decode_dut_port_name(all_ports) + + for dut in duthosts: + if dutname == 'unknown' or dutname == dut.hostname: + candidates = build_test_candidates(dut, fanouthosts, portname) + for _, fanout, fanout_port in candidates: + fanout.no_shutdown(fanout_port) + + time.sleep(60) diff --git a/tests/platform_tests/link_flap/link_flap_utils.py b/tests/platform_tests/link_flap/link_flap_utils.py new file mode 100644 index 00000000000..d1a8fdc8256 --- /dev/null +++ b/tests/platform_tests/link_flap/link_flap_utils.py @@ -0,0 +1,188 @@ +""" +Test utils used by the link flap tests. +""" +import time +import logging + +from tests.common.platform.device_utils import fanout_switch_port_lookup +from tests.common.utilities import wait_until +from tests.common.helpers.assertions import pytest_assert + +logger = logging.getLogger(__name__) + +def __get_dut_if_status(dut, ifname=None): + """ + Get interface status on the DUT. + + Args: + dut: DUT host object + ifname: Interface of DUT + exp_state: State of DUT's port ('up' or 'down') + verbose: Logging port state. + + Returns: + Interface state + """ + if not ifname: + status = dut.show_interface(command='status')['ansible_facts']['int_status'] + else: + status = dut.show_interface(command='status', interfaces=[ifname])['ansible_facts']['int_status'] + return status + + +def __check_if_status(dut, dut_port, exp_state, verbose=False): + """ + Check interface status on the DUT. + + Args: + dut: DUT host object + dut_port: Port of DUT + exp_state: State of DUT's port ('up' or 'down') + verbose: Logging port state. + + Returns: + Bool value which confirm port state + """ + status = __get_dut_if_status(dut, dut_port)[dut_port] + if verbose: + logger.debug("Interface status : %s", status) + return status['oper_state'] == exp_state + + +def __build_candidate_list(candidates, fanout, fanout_port, dut_port, status): + """ + Add candidates to list for link flap test. + + Args: + candidates: List of tuple with DUT's port, + fanout port and fanout + fanout: Fanout host object + fanout_port: Port of fanout + dut_port: Port of DUT + completeness_level: Completeness level. + + Returns: + A list of tuple with DUT's port, fanout port + and fanout + """ + if not fanout or not fanout_port: + logger.info("Skipping port {} that is not found in connection graph".format(dut_port)) + elif status[dut_port]['admin_state'] == 'down': + logger.info("Skipping port {} that is admin down".format(dut_port)) + else: + candidates.append((dut_port, fanout, fanout_port)) + + +def build_test_candidates(dut, fanouthosts, port, completeness_level=None): + """ + Find test candidates for link flap test. + + Args: + dut: DUT host object + fanouthosts: List of fanout switch instances. + port: port + completeness_level: Completeness level. + + Returns: + A list of tuple with DUT's port, fanout port + and fanout + """ + candidates = [] + + if port != 'unknown': + status = __get_dut_if_status(dut, port) + fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, dut.hostname, port) + __build_candidate_list(candidates, fanout, fanout_port, port, status) + else: + # Build the full list + logger.warning("Failed to get ports enumerated as parameter. Fall back to test all ports") + status = __get_dut_if_status(dut) + + for dut_port in status.keys(): + fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, dut.hostname, dut_port) + __build_candidate_list(candidates, fanout, fanout_port, dut_port, status) + + if completeness_level == 'debug': + candidates = random.sample(candidates, 1) + + return candidates + + +def toggle_one_link(dut, dut_port, fanout, fanout_port, watch=False): + """ + Toggle one link on the fanout. + + Args: + dut: DUT host object + dut_port: Port of DUT + fanout: Fanout host object + fanout_port: Port of fanout + watch: Logging system state + """ + logger.info("Testing link flap on %s", dut_port) + + pytest_assert(__check_if_status(dut, dut_port, 'up', verbose=True), "Fail: dut port {}: link operational down".format(dut_port)) + + logger.info("Shutting down fanout switch %s port %s connecting to %s", fanout.hostname, fanout_port, dut_port) + fanout.shutdown(fanout_port) + pytest_assert(wait_until(30, 1, __check_if_status, dut, dut_port, 'down', True), "dut port {} didn't go down as expected".format(dut_port)) + + if watch: + time.sleep(1) + watch_system_status(dut) + + logger.info("Bring up fanout switch %s port %s connecting to %s", fanout.hostname, fanout_port, dut_port) + fanout.no_shutdown(fanout_port) + pytest_assert(wait_until(30, 1, __check_if_status, dut, dut_port, 'up', True), "dut port {} didn't go up as expected".format(dut_port)) + + +def watch_system_status(dut): + """ + Watch DUT's system status + + Args: + dut: DUT host object + """ + # Watch memory status + memory_output = dut.shell("show system-memory")["stdout"] + logger.info("Memory Status: %s", memory_output) + + # Watch orchagent CPU utilization + orch_cpu = dut.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"] + logger.info("Orchagent CPU Util: %s", orch_cpu) + + # Watch Redis Memory + redis_memory = dut.shell("redis-cli info memory | grep used_memory_human")["stdout"] + logger.info("Redis Memory: %s", redis_memory) + + +def check_orch_cpu_utilization(dut, orch_cpu_threshold): + """ + Compare orchagent CPU utilization + + Args: + dut: DUT host object + orch_cpu_threshold: orch cpu threshold + """ + orch_cpu = dut.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"] + return int(float(orch_cpu)) < orch_cpu_threshold + + +def check_bgp_routes(dut, start_time_ip_route_counts, ipv4=False): + """ + Make Sure all ip routes are relearned with jitter of ~5 + + Args: + dut: DUT host object + start_time_ip_route_counts: IP route counts at start + ipv4: Version of IP + """ + if ipv4: + end_time_ip_route_counts = dut.shell("show ip route summary | grep Total | awk '{print $2}'")["stdout"] + logger.info("IPv4 routes at end: %s", end_time_ip_route_counts) + else: + end_time_ip_route_counts = dut.shell("show ipv6 route summary | grep Total | awk '{print $2}'")["stdout"] + logger.info("IPv6 routes at end: %s", end_time_ip_route_counts) + + incr_ip_route_counts = abs(int(float(start_time_ip_route_counts)) - int(float(end_time_ip_route_counts))) + return incr_ip_route_counts < 5 diff --git a/tests/platform_tests/link_flap/test_cont_link_flap.py b/tests/platform_tests/link_flap/test_cont_link_flap.py new file mode 100644 index 00000000000..8af3078c3af --- /dev/null +++ b/tests/platform_tests/link_flap/test_cont_link_flap.py @@ -0,0 +1,119 @@ +""" +Tests the continuous link flap in SONiC. + +Parameters: + --orch_cpu_threshold (int): Which port you want the test to send traffic + to. Default is 3. +""" + +import logging +import time +import pytest + +from tests.common.helpers.assertions import pytest_assert +from tests.common import port_toggle +from tests.platform_tests.link_flap.link_flap_utils import build_test_candidates, toggle_one_link, check_orch_cpu_utilization, check_bgp_routes +from tests.common.utilities import wait_until + + +pytestmark = [ + pytest.mark.disable_loganalyzer, + pytest.mark.topology('any') +] + +class TestContLinkFlap(object): + """ + TestContLinkFlap class for continuous link flap + """ + + def test_cont_link_flap(self, request, duthost, fanouthosts, bring_up_fanout_interfaces, bring_up_dut_interfaces): + """ + Validates that continuous link flap works as expected + + Test steps: + 1.) Flap all interfaces one by one in 1-3 iteration + to cause BGP Flaps. + 2.) Flap all interfaces on peer (FanOutLeaf) one by one 1-3 iteration + to cause BGP Flaps. + 3.) Watch for memory (show system-memory) ,orchagent CPU Utilization + and Redis_memory. + + Pass Criteria: All routes must be re-learned with < 5% increase in Redis and + ORCH agent CPU consumption below threshold after 3 mins after stopping flaps. + """ + orch_cpu_threshold = request.config.getoption("--orch_cpu_threshold") + + # Record memory status at start + memory_output = duthost.shell("show system-memory")["stdout"] + logging.info("Memory Status at start: %s", memory_output) + + # Record Redis Memory at start + start_time_redis_memory = duthost.shell("redis-cli info memory | grep used_memory_human | sed -e 's/.*:\(.*\)M/\\1/'")["stdout"] + logging.info("Redis Memory: %s M", start_time_redis_memory) + + # Record ipv4 route counts at start + start_time_ipv4_route_counts = duthost.shell("show ip route summary | grep Total | awk '{print $2}'")["stdout"] + logging.info("IPv4 routes at start: %s", start_time_ipv4_route_counts) + + # Record ipv6 route counts at start + start_time_ipv6_route_counts = duthost.shell("show ipv6 route summary | grep Total | awk '{print $2}'")["stdout"] + logging.info("IPv6 routes at start %s", start_time_ipv6_route_counts) + + # Make Sure Orch CPU < orch_cpu_threshold before starting test. + logging.info("Make Sure orchagent CPU utilization is less that %d before link flap", orch_cpu_threshold) + pytest_assert(wait_until(100, 2, check_orch_cpu_utilization, duthost, orch_cpu_threshold), + "Orch CPU utilization {} > orch cpu threshold {} before link flap" + .format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold)) + + # Flap all interfaces one by one on DUT + for iteration in range(3): + logging.info("%d Iteration flap all interfaces one by one on DUT", iteration + 1) + port_toggle(duthost, watch=True) + + # Flap all interfaces one by one on Peer Device + for iteration in range(3): + logging.info("%d Iteration flap all interfaces one by one on Peer Device", iteration + 1) + candidates = build_test_candidates(duthost, fanouthosts, 'unknown') + + if not candidates: + pytest.skip("Didn't find any port that is admin up and present in the connection graph") + + for dut_port, fanout, fanout_port in candidates: + toggle_one_link(duthost, dut_port, fanout, fanout_port, watch=True) + + # Make Sure all ipv4 routes are relearned with jitter of ~5 + logging.info("IPv4 routes at start: %s", start_time_ipv4_route_counts) + pytest_assert(wait_until(60, 1, check_bgp_routes, duthost, start_time_ipv4_route_counts, True), "Ipv4 routes are not equal after link flap") + + # Make Sure all ipv6 routes are relearned with jitter of ~5 + logging.info("IPv6 routes at start: %s", start_time_ipv6_route_counts) + pytest_assert(wait_until(60, 1, check_bgp_routes, duthost, start_time_ipv6_route_counts), "Ipv6 routes are not equal after link flap") + + # Record memory status at end + memory_output = duthost.shell("show system-memory")["stdout"] + logging.info("Memory Status at end: %s", memory_output) + + # Record orchagent CPU utilization at end + orch_cpu = duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"] + logging.info("Orchagent CPU Util at end: %s", orch_cpu) + + # Record Redis Memory at end + end_time_redis_memory = duthost.shell("redis-cli info memory | grep used_memory_human | sed -e 's/.*:\(.*\)M/\\1/'")["stdout"] + logging.info("Redis Memory at start: %s M", start_time_redis_memory) + logging.info("Redis Memory at end: %s M", end_time_redis_memory) + + # Calculate diff in Redis memory + incr_redis_memory = float(end_time_redis_memory) - float(start_time_redis_memory) + logging.info("Redis absolute difference: %d", incr_redis_memory) + + # Check redis memory only if it is increased else default to pass + if incr_redis_memory > 0.0: + percent_incr_redis_memory = (incr_redis_memory / float(start_time_redis_memory)) * 100 + logging.info("Redis Memory percentage Increase: %d", percent_incr_redis_memory) + pytest_assert(percent_incr_redis_memory < 5, "Redis Memory Increase more than expected: {}".format(percent_incr_redis_memory)) + + # Orchagent CPU should consume < orch_cpu_threshold at last. + logging.info("watch orchagent CPU utilization when it goes below %d", orch_cpu_threshold) + pytest_assert(wait_until(45, 2, check_orch_cpu_utilization, duthost, orch_cpu_threshold), + "Orch CPU utilization {} > orch cpu threshold {} before link flap" + .format(duthost.shell("show processes cpu | grep orchagent | awk '{print $9}'")["stdout"], orch_cpu_threshold)) diff --git a/tests/platform_tests/link_flap/test_link_flap.py b/tests/platform_tests/link_flap/test_link_flap.py new file mode 100644 index 00000000000..b46f25aa863 --- /dev/null +++ b/tests/platform_tests/link_flap/test_link_flap.py @@ -0,0 +1,61 @@ +""" +Tests the link flap in SONiC. +""" +import logging + +import pytest +import random + +from tests.common.plugins.test_completeness import CompletenessLevel +from tests.platform_tests.link_flap.link_flap_utils import build_test_candidates, toggle_one_link +from tests.common.helpers.assertions import pytest_require +from tests.common.helpers.dut_ports import decode_dut_port_name + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.disable_loganalyzer, + pytest.mark.topology('any'), + pytest.mark.supported_completeness_level(CompletenessLevel.debug, CompletenessLevel.basic) +] + + +class TestLinkFlap(object): + """ + TestLinkFlap class for link flap + """ + def __init__(self, request): + """ + Initialization of parameters for test + + Args: + request: pytest request object + """ + self.completeness_level = CompletenessLevel.get_normalized_level(request) + + def run_link_flap_test(self, dut, fanouthosts, port): + """ + Test runner of link flap test. + + Args: + dut: DUT host object + fanouthosts: List of fanout switch instances. + """ + candidates = build_test_candidates(dut, fanouthosts, port, self.completeness_level) + pytest_require(candidates, "Didn't find any port that is admin up and present in the connection graph") + + for dut_port, fanout, fanout_port in candidates: + toggle_one_link(dut, dut_port, fanout, fanout_port) + + +@pytest.mark.platform('physical') +def test_link_flap(request, duthosts, all_ports, fanouthosts, bring_up_fanout_interfaces): + """ + Validates that link flap works as expected + """ + tlf = TestLinkFlap(request) + + dutname, portname = decode_dut_port_name(all_ports) + for dut in duthosts: + if dutname == 'unknown' or dutname == dut.hostname: + tlf.run_link_flap_test(dut, fanouthosts, portname) diff --git a/tests/platform_tests/test_link_flap.py b/tests/platform_tests/test_link_flap.py deleted file mode 100644 index 5297c979b31..00000000000 --- a/tests/platform_tests/test_link_flap.py +++ /dev/null @@ -1,110 +0,0 @@ -import logging - -import pytest -import random - -from tests.common.platform.device_utils import fanout_switch_port_lookup -from tests.common.utilities import wait_until -from tests.common.plugins.test_completeness import CompletenessLevel -from tests.common.helpers.assertions import pytest_require -from tests.common.helpers.dut_ports import decode_dut_port_name - -logger = logging.getLogger(__name__) - -pytestmark = [ - pytest.mark.disable_loganalyzer, - pytest.mark.topology('any'), - pytest.mark.supported_completeness_level(CompletenessLevel.debug, CompletenessLevel.basic) -] - -class TestLinkFlap: - def __init__(self, request): - self.completeness_level = CompletenessLevel.get_normalized_level(request) - - def __get_dut_if_status(self, dut, ifname=None): - if not ifname: - status = dut.show_interface(command='status')['ansible_facts']['int_status'] - else: - status = dut.show_interface(command='status', interfaces=[ifname])['ansible_facts']['int_status'] - - return status - - - def __check_if_status(self, dut, dut_port, exp_state, verbose=False): - status = self.__get_dut_if_status(dut, dut_port)[dut_port] - if verbose: - logger.debug("Interface status : {}".format(status)) - return status['oper_state'] == exp_state - - - def __toggle_one_link(self, dut, dut_port, fanout, fanout_port): - logger.info("Testing link flap on {}".format(dut_port)) - - assert self.__check_if_status(dut, dut_port, 'up', verbose=True), "Fail: dut port {}: link operational down".format(dut_port) - - logger.info("Shutting down fanout switch {} port {} connecting to {}".format(fanout.hostname, fanout_port, dut_port)) - self.ports_shutdown_by_test.add((fanout, fanout_port)) - fanout.shutdown(fanout_port) - wait_until(30, 1, self.__check_if_status, dut, dut_port, 'down') - assert self.__check_if_status(dut, dut_port, 'down', verbose=True), "dut port {} didn't go down as expected".format(dut_port) - - logger.info("Bring up fanout switch {} port {} connecting to {}".format(fanout.hostname, fanout_port, dut_port)) - fanout.no_shutdown(fanout_port) - wait_until(30, 1, self.__check_if_status, dut, dut_port, 'up') - assert self.__check_if_status(dut, dut_port, 'up', verbose=True), "dut port {} didn't go down as expected".format(dut_port) - self.ports_shutdown_by_test.discard((fanout, fanout_port)) - - - def __build_candidate_list(self, candidates, fanout, fanout_port, dut_port, status): - if not fanout or not fanout_port: - logger.info("Skipping port {} that is not found in connection graph".format(dut_port)) - elif status[dut_port]['admin_state'] == 'down': - logger.info("Skipping port {} that is admin down".format(dut_port)) - else: - candidates.append((dut_port, fanout, fanout_port)) - - - def __build_test_candidates(self, dut, fanouthosts, port): - candidates = [] - if port != 'unknown': - status = self.__get_dut_if_status(dut, port) - fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, dut.hostname, port) - self.__build_candidate_list(candidates, fanout, fanout_port, port, status) - else: - # Build the full list - logger.warning("Failed to get ports enumerated as parameter. Fall back to test all ports") - status = self.__get_dut_if_status(dut) - - for dut_port in status.keys(): - fanout, fanout_port = fanout_switch_port_lookup(fanouthosts, dut.hostname, dut_port) - self.__build_candidate_list(candidates, fanout, fanout_port, dut_port, status) - - if self.completeness_level == 'debug': - candidates = random.sample(candidates, 1) - - return candidates - - - def run_link_flap_test(self, dut, fanouthosts, port): - self.ports_shutdown_by_test = set() - - candidates = self.__build_test_candidates(dut, fanouthosts, port) - pytest_require(candidates, "Didn't find any port that is admin up and present in the connection graph") - - try: - for dut_port, fanout, fanout_port in candidates: - self.__toggle_one_link(dut, dut_port, fanout, fanout_port) - finally: - logger.info("Restoring fanout switch ports that were shut down by test") - for fanout, fanout_port in self.ports_shutdown_by_test: - logger.debug("Restoring fanout switch {} port {} shut down by test".format(fanout.hostname, fanout_port)) - fanout.no_shutdown(fanout_port) - -@pytest.mark.platform('physical') -def test_link_flap(request, duthosts, all_ports, fanouthosts): - tlf = TestLinkFlap(request) - - dutname, portname = decode_dut_port_name(all_ports) - for dut in duthosts: - if dutname == 'unknown' or dutname == dut.hostname: - tlf.run_link_flap_test(dut, fanouthosts, portname) diff --git a/tests/platform_tests/test_port_toggle.py b/tests/platform_tests/test_port_toggle.py new file mode 100644 index 00000000000..d8683de1878 --- /dev/null +++ b/tests/platform_tests/test_port_toggle.py @@ -0,0 +1,30 @@ +""" +Tests the port toggle in SONiC. +""" + +import pytest + +from tests.common import port_toggle + + +pytestmark = [ + pytest.mark.topology("any") +] + + +class TestPortToggle(object): + """ + TestPortToggle class for testing port toggle + """ + + def test_port_toggle(self, duthost, bring_up_dut_interfaces): + """ + Validates that port toggle works as expected + + Test steps: + 1.) Flap all interfaces on DUT one by one. + 2.) Verify interfaces are up correctly. + + Pass Criteria: All interfaces are up correctly. + """ + port_toggle(duthost)