Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 15 additions & 10 deletions tests/common/plugins/test_completeness/README.md
Original file line number Diff line number Diff line change
@@ -1,22 +1,27 @@
### CompletenessLevel markers
CompletenessLevel marker enables testcases to be executed in different meaningful levels.
Each level is a representation of a scope of execution of a testcase. This document describes the usage of CompletenessLevel marker.
Identified meaningful levels (in increasing order) -
Defined levels (in increasing order) -

Debug
debug

Basic
basic

Confident
confident

Thorough
thorough

An unordered level `diagnose` is also supported:
`diagnose` is meant to analyze an existing defect or a feature. This level is different from the ordered levels that are primarily meant to validate SONiC.
Diagnose encompasses a special set of test scenarios that are known to fail because of an existing image issue. This level is intended for a manual test targetted at executing unhealthy scenarios only as defined in the test. If `diagnose` level is specified and a test doesn’t support it, then it shall fall back to `basic` mode.

### To use CompletenessLevel:
- Mark the testcase with marker ```supported_completeness_level```. This marker is a list of all the completeness levels supported by a testcase.
- During Pytest execution, use command line option ```--completeness_level``` to specify the test completeness level.
- Automatic normalization between specified ```--completeness_level``` and defined ```supported_completeness_level``` will be performed and the test will be executed at the resultant normalized level of completeness.
- If module/session/testcase have different supported levels of completeness, the inner most level will supersede any defined level.
For eg., if the module and testcase have supported levels "debug, basic, thorough" and "confident" respectively, the resultant defined level for this testcase will be "confident".
- Within a testcase - Class method `CompletenessLevel.get_normalized_level()` can be called with the test's `request` object to get the normalized level.

### Different cases for CompletenessLevel

Expand All @@ -37,20 +42,20 @@ To handle any discrepancy between specified and defined completeness levels, nor
import pytest
from tests.common.plugins.test_completeness import CompletenessLevel

pytestmark = [pytest.mark.supported_completeness_level(CompletenessLevel.Debug, CompletenessLevel.Thorough)]
pytestmark = [pytest.mark.supported_completeness_level(CompletenessLevel.debug, CompletenessLevel.thorough)]

def test_test_completeness_default(request):
normalized_level = [mark.args for mark in request.node.iter_markers(name="supported_completeness_level")]
normalized_level = CompletenessLevel.get_normalized_level(request)
logger.info("Completeness level set to: {}".format(str(normalized_level)))

## Continue execution of the testecase until the completeness level specified.
# Debug - Do something - end the test if the specified level is Debug
# debug - Do something - end the test if the specified level is debug
...
...
# Basic - Do something more - extra tests/verifications - end the test now if the level is Basic
# basic - Do something more - extra tests/verifications - end the test now if the level is basic
...
...
# Thorough - Run entire test - if the set level is Thorough
# thorough - Run entire test - if the set level is thorough
```

### CompletenessLevel execution snippets
Expand Down
50 changes: 46 additions & 4 deletions tests/common/plugins/test_completeness/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,41 @@ class CompletenessLevel(enum.IntEnum):
basic = 1
confident = 2
thorough = 3 # Maximum execution
diagnose = 100 # diagnose is an unordered level. INT value 100 is assigned for simplicity reasons

@classmethod
def get_normalized_level(cls, request):
"""Get the normalized completeness level for a given test instance.

For example, if a testcase supports "CompletenessLevel.basic, CompletenessLevel.thorough", and the specified level
during test execution is "confident", then this method will normalize the level to "basic".

Returns:
CompletenessLevel as a string
"""
all_supported_levels = [mark.args for mark in request.node.iter_markers(name="supported_completeness_level")]
logging.info("All supported completeness levels of the test: {}".format(str(all_supported_levels)))
normalized_level = all_supported_levels[0][0]
normalized_level_name = CompletenessLevel.get_level_name(normalized_level)
logging.info("Normalized completeness level set to: {}".format(normalized_level_name))
return normalized_level_name

@classmethod
def get_level_name(cls, level):
"""Converts a type CompletenessLevel to type str.

For example, if input is CompletenessLevel.basic, this method will return "basic."

Arguments:
level - An enum value of type CompletenessLevel

Returns:
CompletenessLevel as a string
"""
if type(level) is not CompletenessLevel:
logging.error("Invalid completeness type. Expected: {}. Format {}".format(str(CompletenessLevel), type(level)))
level_name = level.name.lower()
return level_name

def set_default(specified_level):
if not specified_level: # Case 1
Expand All @@ -17,16 +52,22 @@ def set_default(specified_level):
specified_level = specified_level.lower()
if specified_level not in CompletenessLevel._member_names_:
specified_level = CompletenessLevel.basic
warnings.warn("Unidentified completeness level specified. Specified: {}. Allowed: {}".format(str(CompletenessLevel(specified_level)), \
str(CompletenessLevel._member_names_)))
warnings.warn("Unidentified completeness level specified. Specified: {}. Allowed: {}"\
.format(str(CompletenessLevel(specified_level)), str(CompletenessLevel._member_names_)))
logging.info("Unidentified completeness level specified. Setting to default level: {}".format(CompletenessLevel.basic))
else:
specified_level = CompletenessLevel[specified_level]

return specified_level

def normalize_levels(specified_level, defined_levels):
logging.info("Setting test completeness level. Specified: {}. Defined: {}".format(str(CompletenessLevel(specified_level)), str(defined_levels)))
logging.info("Setting test completeness level. Specified: {}. Defined: {}".\
format(str(CompletenessLevel(specified_level)), str(defined_levels)))

if specified_level not in defined_levels:
# if specified_level is diagnose and the testcase does not support it, default level is basic.
if specified_level == CompletenessLevel.diagnose:
specified_level = CompletenessLevel.basic

if specified_level not in defined_levels:
if specified_level > max(defined_levels): # Case 3.1
Expand All @@ -40,7 +81,8 @@ def normalize_levels(specified_level, defined_levels):
if level <= specified_level and lesser_defined_level_dist > (specified_level - level):
completeness_level = level
lesser_defined_level_dist = lesser_defined_level_dist - level
logging.info("Specified level ({}) not found in defined levels. Setting level to {}".format(str(CompletenessLevel(specified_level)), str(completeness_level)))
logging.info("Specified level ({}) not found in defined levels. Setting level to {}".\
format(str(CompletenessLevel(specified_level)), str(completeness_level)))
else: # Case 4
completeness_level = specified_level
logging.info("Setting the completeness level to {}".format(str(CompletenessLevel(completeness_level))))
Expand Down
17 changes: 9 additions & 8 deletions tests/platform_tests/test_link_flap.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,9 @@
]

class TestLinkFlap:
def __init__(self, request):
self.completeness_level = CompletenessLevel.get_normalized_level(request)

def __get_dut_if_status(self, dut, ifname=None):
if not ifname:
status = dut.show_interface(command='status')['ansible_facts']['int_status']
Expand Down Expand Up @@ -47,7 +50,7 @@ def __toggle_one_link(self, dut, dut_port, fanout, fanout_port):
self.ports_shutdown_by_test.discard((fanout, fanout_port))


def __build_test_candidates(self, dut, fanouthosts, completeness_level):
def __build_test_candidates(self, dut, fanouthosts):
status = self.__get_dut_if_status(dut)
candidates = []

Expand All @@ -60,17 +63,17 @@ def __build_test_candidates(self, dut, fanouthosts, completeness_level):
logging.info("Skipping port {} that is admin down".format(dut_port))
else:
candidates.append((dut_port, fanout, fanout_port))
if CompletenessLevel.debug in completeness_level:
if self.completeness_level == 'debug':
# Run the test for one port only - to just test if the test works fine
return candidates

return candidates


def run_link_flap_test(self, dut, fanouthosts, completeness_level):
def run_link_flap_test(self, dut, fanouthosts):
self.ports_shutdown_by_test = set()

candidates = self.__build_test_candidates(dut, fanouthosts, completeness_level)
candidates = self.__build_test_candidates(dut, fanouthosts)
if not candidates:
pytest.skip("Didn't find any port that is admin up and present in the connection graph")

Expand All @@ -85,7 +88,5 @@ def run_link_flap_test(self, dut, fanouthosts, completeness_level):

@pytest.mark.platform('physical')
def test_link_flap(request, duthost, fanouthosts):
normalized_level = [mark.args for mark in request.node.iter_markers(name="supported_completeness_level")][0]
logging.info("Completeness level set: {}".format(str(normalized_level)))
tlf = TestLinkFlap()
tlf.run_link_flap_test(duthost, fanouthosts, normalized_level)
tlf = TestLinkFlap(request)
tlf.run_link_flap_test(duthost, fanouthosts)