Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,6 @@ Hosting pytest test infrastructure and test cases
* execute link flap test case.
* when specifying test cast list, no need to specify topology with -t.

### Run scripts under a folder ###
* ./run_tests.sh -d <dut_name> -n <testbed_name> -u -c "snmp/test_*.py" -s "snmp/test_snmp_cpu.py"
* execute all the scripts under `snmp`, skip script `snmp/test_snmp_cpu.py`.
* the test scripts pattern **MUST** be double quoted, otherwise there would be problem of running correct scripts.

### Run a list of test cases ###
* ./run_tests.sh -d <dut_name> -n <testbed_name> [-s <list of test cases or files to skip>] -u -c "platform_tests/test_link_flap.py platform_tests/test_reboot.py::test_cold_reboot"
Expand Down
38 changes: 14 additions & 24 deletions tests/run_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -89,27 +89,6 @@ function setup_environment()

function setup_test_options()
{
# If a test script is explicitly specified in pytest command line, then use `--ignore` to ignore it will not work
# Below logic is to ensure that SKIP_FOLDERS and SKIP_SCRIPTS take precedence over the specified TEST_CASES.
# If a test script is in both ${TEST_CASES} and ${SKIP_SCRIPTS}, the script will not be executed. This design is
# for the scenario of specifying test scripts using pattern like `subfolder/test_*.py`. The pattern will be
# expanded to matched test scripts by bash. Among the expanded scripts, we may want to skip a few. Then we can
# explicitly specify the script to be skipped.
SKIP_SCRIPTS="${SKIP_SCRIPTS} test_announce_routes.py test_nbr_health.py"
ignores=$(python -c "print '|'.join('''$SKIP_FOLDERS'''.split())")
if [[ -z ${TEST_CASES} ]]; then
# When TEST_CASES is not specified, find all the possible scripts, ignore the scripts under $SKIP_FOLDERS
all_scripts=$(find ./ -name 'test_*.py' | sed s:^./:: | grep -vE "^(${ignores})")
else
# When TEST_CASES is specified, ignore the scripts under $SKIP_FOLDERS
all_scripts=""
for test_script in ${TEST_CASES}; do
all_scripts="${all_scripts} $(echo ${test_script} | sed s:^./:: | grep -vE "^(${ignores})")"
done
fi
# Ignore the scripts specified in $SKIP_SCRIPTS
TEST_CASES=$(python -c "print '\n'.join(set('''$all_scripts'''.split()) - set('''$SKIP_SCRIPTS'''.split()))" | sort)

PYTEST_COMMON_OPTS="--inventory ${INVENTORY} \
--host-pattern ${DUT_NAME} \
--testbed ${TESTBED_NAME} \
Expand Down Expand Up @@ -210,15 +189,26 @@ function cleanup_dut()
function run_group_tests()
{
echo "=== Running tests in groups ==="
pytest ${TEST_CASES} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS}
pytest ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS} ${TEST_CASES}
}

function run_individual_tests()
{
if [[ -n ${TEST_CASES} ]] ;then
test_scripts=${TEST_CASES}
else
SKIP_SCRIPTS="${SKIP_SCRIPTS} test_announce_routes.py test_nbr_health.py"

ignores=$(python -c "print '|'.join('''$SKIP_FOLDERS'''.split())")

all_scripts=$(find ./ -name 'test_*.py' | sed s:^./:: | grep -vE "^(${SKIP_FOLDERS})")
test_scripts=$(python -c "print '\n'.join(set('''$all_scripts'''.split()) - set('''$SKIP_SCRIPTS'''.split()))" | sort)
fi

EXIT_CODE=0

echo "=== Running tests individually ==="
for test_script in ${TEST_CASES}; do
for test_script in ${test_scripts}; do
if [[ x"${OMIT_FILE_LOG}" != x"True" ]]; then
test_dir=$(dirname ${test_script})
script_name=$(basename ${test_script})
Expand All @@ -229,7 +219,7 @@ function run_individual_tests()
TEST_LOGGING_OPTIONS="--log-file ${LOG_PATH}/${test_dir}/${test_name}.log --junitxml=${LOG_PATH}/${test_dir}/${test_name}.xml"
fi

pytest ${test_script} ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${EXTRA_PARAMETERS}
pytest ${PYTEST_COMMON_OPTS} ${TEST_LOGGING_OPTIONS} ${TEST_TOPOLOGY_OPTIONS} ${test_script} ${EXTRA_PARAMETERS}
ret_code=$?

# If test passed, no need to keep its log.
Expand Down