Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 36 additions & 6 deletions tests/bgp/bgp_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -876,12 +876,31 @@ def fetch_and_delete_pcap_file(bgp_pcap, log_dir, duthost, request):
return local_pcap_filename


def is_chassis_system(duthost):
try:
result = duthost.shell(
'python3 -c "from sonic_py_common import device_info; print(device_info.is_chassis())"',
module_ignore_errors=True
)
if result.get('rc', 1) == 0:
return result.get('stdout', '').strip().lower() == 'true'
return False
except Exception:
return False


def get_tsa_chassisdb_config(duthost):
"""
@summary: Returns the dut's CHASSIS_APP_DB value for BGP_DEVICE_GLOBAL.STATE.tsa_enabled flag
"""
tsa_conf = duthost.shell('sonic-db-cli CHASSIS_APP_DB HGET \'BGP_DEVICE_GLOBAL|STATE\' tsa_enabled')['stdout']
return tsa_conf
if is_chassis_system(duthost):
# Supervisor node on a chassis system: CHASSIS_APP_DB holds the global TSA state
return duthost.shell(
"sonic-db-cli CHASSIS_APP_DB HGET 'BGP_DEVICE_GLOBAL|STATE' tsa_enabled"
)["stdout"]
# Linecards and non-chassis systems: read from local CONFIG_DB
return duthost.shell(
"sonic-db-cli CONFIG_DB HGET 'BGP_DEVICE_GLOBAL|STATE' tsa_enabled")["stdout"]


def get_sup_cfggen_tsa_value(suphost):
Expand All @@ -898,11 +917,22 @@ def verify_dut_configdb_tsa_value(duthost):
"""
tsa_config = list()
tsa_enabled = False
for asic_index in duthost.get_frontend_asic_ids():
prefix = "-n asic{}".format(asic_index) if asic_index != DEFAULT_ASIC_ID else ''
output = duthost.shell('sonic-db-cli {} CONFIG_DB HGET \'BGP_DEVICE_GLOBAL|STATE\' \'tsa_enabled\''.
format(prefix))['stdout']

if is_chassis_system(duthost):
# On chassis supervisor, check CHASSIS_APP_DB
output = duthost.shell(
"sonic-db-cli CHASSIS_APP_DB HGET 'BGP_DEVICE_GLOBAL|STATE' 'tsa_enabled'"
)["stdout"]
tsa_config.append(output)
else:
# On linecards and non-chassis systems, check CONFIG_DB for each ASIC
for asic_index in duthost.get_frontend_asic_ids():
prefix = "-n asic{}".format(asic_index) if asic_index != DEFAULT_ASIC_ID else ''
output = duthost.shell(
"sonic-db-cli {} CONFIG_DB HGET 'BGP_DEVICE_GLOBAL|STATE' 'tsa_enabled'".format(prefix)
)["stdout"]
tsa_config.append(output)

if 'true' in tsa_config:
tsa_enabled = True

Expand Down
26 changes: 16 additions & 10 deletions tests/bgp/reliable_tsa/test_reliable_tsa_flaky.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,17 +316,19 @@ def verify_line_card_after_sup_tsa(lc):
executor.submit(verify_line_card_after_sup_tsa, linecard)

# Verify dut config_reload scenario for one of the line card to make sure tsa config is in sync
first_linecard = duthosts.frontend_nodes[0]
first_linecard.shell('sudo config save -y')
config_reload(first_linecard, safe_reload=True, check_intf_up_ports=True)
# Skip this test for non-chassis systems (no linecards)
if duthosts.frontend_nodes:
first_linecard = duthosts.frontend_nodes[0]
first_linecard.shell('sudo config save -y')
config_reload(first_linecard, safe_reload=True, check_intf_up_ports=True)

# Verify DUT is in maintenance state.
pytest_assert(TS_MAINTENANCE == get_traffic_shift_state(first_linecard, cmd='TSC no-stats'),
"DUT is not in maintenance state after config reload")
assert_only_loopback_routes_announced_to_neighs(duthosts, first_linecard,
dut_nbrhosts[first_linecard],
traffic_shift_community,
"Failed to verify routes on nbr in TSA")
# Verify DUT is in maintenance state.
pytest_assert(TS_MAINTENANCE == get_traffic_shift_state(first_linecard, cmd='TSC no-stats'),
"DUT is not in maintenance state after config reload")
assert_only_loopback_routes_announced_to_neighs(duthosts, first_linecard,
dut_nbrhosts[first_linecard],
traffic_shift_community,
"Failed to verify routes on nbr in TSA")

# Verify supervisor still has tsa_enabled 'true' config
pytest_assert('true' == get_tsa_chassisdb_config(suphost),
Expand Down Expand Up @@ -682,6 +684,10 @@ def test_sup_tsb_when_startup_tsa_tsb_service_running(duthosts, localhost, enum_
orig_v6_routes[linecard] = parse_routes_on_neighbors(linecard, dut_nbrhosts[linecard], 6)

# Verify dut reboot scenario for one of the line card to make sure tsa config is in sync
# Skip this test for non-chassis systems (no linecards)
if not duthosts.frontend_nodes:
pytest.skip("No linecards to test on")

first_linecard = duthosts.frontend_nodes[0]
logger.info("Cold reboot on node: %s", first_linecard.hostname)
reboot(first_linecard, localhost, wait=240)
Expand Down
24 changes: 13 additions & 11 deletions tests/common/devices/duthosts.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,13 +100,7 @@ def __initialize_nodes_for_parallel(self):
self._nodes_for_parallel_initial_checks if self.is_parallel_leader else self._nodes_for_parallel_tests
)

self._supervisor_nodes = self._Nodes([
node for node in self._nodes_for_parallel if node.is_supervisor_node()
])

self._frontend_nodes = self._Nodes([
node for node in self._nodes_for_parallel if node.is_frontend_node()
])
self.__assign_groups(self._nodes_for_parallel)

def __initialize_nodes(self):
self._nodes = self._Nodes([
Expand All @@ -118,8 +112,7 @@ def __initialize_nodes(self):
) for hostname in self.tbinfo["duts"] if hostname in self.duts
])

self._supervisor_nodes = self._Nodes([node for node in self._nodes if node.is_supervisor_node()])
self._frontend_nodes = self._Nodes([node for node in self._nodes if node.is_frontend_node()])
self.__assign_groups(self._nodes)

def __should_reinit_when_parallel(self):
return (
Expand All @@ -137,8 +130,17 @@ def __reinit_nodes_for_parallel(self):
self.parallel_run_stage = NON_INITIAL_CHECKS_STAGE
self._nodes_for_parallel = self._nodes_for_parallel_tests

self._supervisor_nodes = self._Nodes([node for node in self._nodes_for_parallel if node.is_supervisor_node()])
self._frontend_nodes = self._Nodes([node for node in self._nodes_for_parallel if node.is_frontend_node()])
self.__assign_groups(self._nodes_for_parallel)

def __assign_groups(self, nodes):
"""Assign supervisor_nodes and frontend_nodes based on provided nodes list.
For non-chassis (no supervisors), frontend_nodes is an empty list.
"""
self._supervisor_nodes = self._Nodes([node for node in nodes if node.is_supervisor_node()])
if any(node.is_supervisor_node() for node in nodes):
self._frontend_nodes = self._Nodes([node for node in nodes if node.is_frontend_node()])
else:
self._frontend_nodes = self._Nodes([])

@property
def nodes(self):
Expand Down
Loading