Skip to content

Commit 4e60a7d

Browse files
authored
refactor: optimize qos sai test (#16428)
Description of PR Optimize the qos/test_qos_sai.py test to reduce the running time. Summary: Fixes # (issue) Microsoft ADO 30056122 Type of change Bug fix Testbed and Framework(new/improvement) New Test case Skipped for non-supported platforms Add ownership here(Microsft required only) Test case improvement Approach What is the motivation for this PR? The running time of the qos/test_qos_sai.py test is too long (~9h) on T2 chassis so we wanted to reduce the running time. With this implementation, the running time will be reduced to (~7.5h) How did you do it? How did you verify/test it? I ran the updated code and can confirm it's working well on T2 chassis: Elastictest link. The 2 DWRR failures are expected, which will be fixed after #16199 I also ran a T1 regression test to confirm: Elastictest link co-authorized by: [email protected]
1 parent 6feb630 commit 4e60a7d

File tree

1 file changed

+33
-15
lines changed

1 file changed

+33
-15
lines changed

tests/qos/qos_sai_base.py

Lines changed: 33 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
from tests.common.fixtures.ptfhost_utils import ptf_portmap_file # noqa F401
1616
from tests.common.helpers.assertions import pytest_assert, pytest_require
17+
from tests.common.helpers.multi_thread_utils import SafeThreadPoolExecutor
1718
from tests.common.mellanox_data import is_mellanox_device as isMellanoxDevice
1819
from tests.common.cisco_data import is_cisco_device
1920
from tests.common.dualtor.dual_tor_utils import upper_tor_host, lower_tor_host, dualtor_ports, is_tunnel_qos_remap_enabled # noqa F401
@@ -590,13 +591,18 @@ def swapSyncd_on_selected_duts(self, request, duthosts, creds, tbinfo, lower_tor
590591
new_creds['docker_registry_password'] = ''
591592
else:
592593
new_creds = creds
593-
for duthost in dut_list:
594-
docker.swap_syncd(duthost, new_creds)
594+
595+
with SafeThreadPoolExecutor(max_workers=8) as executor:
596+
for duthost in dut_list:
597+
executor.submit(docker.swap_syncd, duthost, new_creds)
598+
595599
yield
600+
596601
finally:
597602
if swapSyncd:
598-
for duthost in dut_list:
599-
docker.restore_default_syncd(duthost, new_creds)
603+
with SafeThreadPoolExecutor(max_workers=8) as executor:
604+
for duthost in dut_list:
605+
executor.submit(docker.restore_default_syncd, duthost, new_creds)
600606

601607
@pytest.fixture(scope='class', name="select_src_dst_dut_and_asic",
602608
params=["single_asic", "single_dut_multi_asic",
@@ -1414,23 +1420,31 @@ def updateDockerService(host, docker="", action="", service=""): # noqa: F811
14141420
upper_tor_host, testcase="test_qos_sai", feature_list=feature_list)
14151421

14161422
disable_container_autorestart(src_dut, testcase="test_qos_sai", feature_list=feature_list)
1417-
for service in src_services:
1418-
updateDockerService(src_dut, action="stop", **service)
1423+
with SafeThreadPoolExecutor(max_workers=8) as executor:
1424+
for service in src_services:
1425+
executor.submit(updateDockerService, src_dut, action="stop", **service)
1426+
14191427
src_dut.shell("sudo config bgp shutdown all")
14201428
if src_asic != dst_asic:
14211429
disable_container_autorestart(dst_dut, testcase="test_qos_sai", feature_list=feature_list)
1422-
for service in dst_services:
1423-
updateDockerService(dst_dut, action="stop", **service)
1430+
with SafeThreadPoolExecutor(max_workers=8) as executor:
1431+
for service in dst_services:
1432+
executor.submit(updateDockerService, dst_dut, action="stop", **service)
1433+
14241434
dst_dut.shell("sudo config bgp shutdown all")
14251435

14261436
yield
14271437

1428-
for service in src_services:
1429-
updateDockerService(src_dut, action="start", **service)
1438+
with SafeThreadPoolExecutor(max_workers=8) as executor:
1439+
for service in src_services:
1440+
executor.submit(updateDockerService, src_dut, action="start", **service)
1441+
14301442
src_dut.shell("sudo config bgp start all")
14311443
if src_asic != dst_asic:
1432-
for service in dst_services:
1433-
updateDockerService(dst_dut, action="start", **service)
1444+
with SafeThreadPoolExecutor(max_workers=8) as executor:
1445+
for service in dst_services:
1446+
executor.submit(updateDockerService, dst_dut, action="start", **service)
1447+
14341448
dst_dut.shell("sudo config bgp start all")
14351449

14361450
""" Start mux conatiner for dual ToR """
@@ -1909,9 +1923,13 @@ def dut_disable_ipv6(self, duthosts, tbinfo, lower_tor_host, swapSyncd_on_select
19091923
logger.info("Adding docker0's IPv6 address since it was removed when disabing IPv6")
19101924
duthost.shell("ip -6 addr add {} dev docker0".format(all_docker0_ipv6_addrs[duthost.hostname]))
19111925

1912-
# TODO: parallelize this step.. Do we really need this ?
1913-
for duthost in dut_list:
1914-
config_reload(duthost, config_source='config_db', safe_reload=True, check_intf_up_ports=True)
1926+
# TODO: Do we really need this ?
1927+
with SafeThreadPoolExecutor(max_workers=8) as executor:
1928+
for duthost in dut_list:
1929+
executor.submit(
1930+
config_reload,
1931+
duthost, config_source='config_db', safe_reload=True, check_intf_up_ports=True,
1932+
)
19151933

19161934
@pytest.fixture(scope='class', autouse=True)
19171935
def sharedHeadroomPoolSize(

0 commit comments

Comments
 (0)