diff --git a/ansible/library/show_interface.py b/ansible/library/show_interface.py index c0a2cc7902e..b01f8e3d6a9 100644 --- a/ansible/library/show_interface.py +++ b/ansible/library/show_interface.py @@ -117,8 +117,9 @@ def _fetch_interface_type(self, line): return ' '.join(line.split()[9:-1]) def collect_interface_status(self, namespace=None, include_internal_intfs=False, include_inband_intfs=False): + # W/A until Issue https://github.com/sonic-net/sonic-utilities/issues/3444 is fixed regex_int_fec = re.compile( - r'(\S+)\s+[\d,N\/A]+\s+(\w+)\s+(\d+)\s+(rs|fc|N\/A|none)\s+([\w\/]+)\s+(\w+)\s+(\w+)\s+(\w+)') + r'(\S+)\s+[\d,N\/A]+\s+(\w+)\s+(\d+)\s+(rs|fc|N\/A|none)\s+([\w\/]+)\s+(\w+)\s+([\w\/]+)\s+(\w+)') regex_int = re.compile( r'(\S+)\s+[\d,N\/A]+\s+(\w+)\s+(\d+)\s+([\w\/]+)\s+(\w+)\s+(\w+)\s+(\w+)') regex_int_internal = re.compile( diff --git a/tests/common/gu_utils.py b/tests/common/gu_utils.py index 114d2e9a8e9..65d6dc23e5e 100644 --- a/tests/common/gu_utils.py +++ b/tests/common/gu_utils.py @@ -39,7 +39,9 @@ def apply_patch(duthost, json_data, dest_file): json_data: Source json patch to apply dest_file: Destination file on duthost """ - duthost.copy(content=json.dumps(json_data, indent=4), dest=dest_file) + patch_content = json.dumps(json_data, indent=4) + duthost.copy(content=patch_content, dest=dest_file) + logger.debug("Patch Content: {}".format(patch_content)) cmds = 'config apply-patch {}'.format(dest_file) @@ -470,3 +472,58 @@ def expect_acl_rule_removed(duthost, rulename, setup): removed = len(output) == 0 pytest_assert(removed, "'{}' showed a rule, this following rule should have been removed".format(cmds)) + + +def save_backup_test_config(duthost, file_postfix="bkp"): + """Save test env before a test case starts. + + Back up the existing config_db.json file(s). + + Args: + duthost: Device Under Test (DUT) + file_postfix: Postfix string to be used for the backup files. + + Returns: + None. + """ + CONFIG_DB = "/etc/sonic/config_db.json" + CONFIG_DB_BACKUP = "/etc/sonic/config_db.json.{}".format(file_postfix) + + logger.info("Backup {} to {} on {}".format( + CONFIG_DB, CONFIG_DB_BACKUP, duthost.hostname)) + duthost.shell("cp {} {}".format(CONFIG_DB, CONFIG_DB_BACKUP)) + if duthost.is_multi_asic: + for n in range(len(duthost.asics)): + asic_config_db = "/etc/sonic/config_db{}.json".format(n) + asic_config_db_backup = "/etc/sonic/config_db{}.json.{}".format(n, file_postfix) + logger.info("Backup {} to {} on {}".format( + asic_config_db, asic_config_db_backup, duthost.hostname)) + duthost.shell("cp {} {}".format(asic_config_db, asic_config_db_backup)) + + +def restore_backup_test_config(duthost, file_postfix="bkp", config_reload=True): + """Restore test env after a test case finishes. + + Args: + duthost: Device Under Test (DUT) + file_postfix: Postfix string to be used for restoring the saved backup files. + + Returns: + None. + """ + CONFIG_DB = "/etc/sonic/config_db.json" + CONFIG_DB_BACKUP = "/etc/sonic/config_db.json.{}".format(file_postfix) + + logger.info("Restore {} with {} on {}".format( + CONFIG_DB, CONFIG_DB_BACKUP, duthost.hostname)) + duthost.shell("mv {} {}".format(CONFIG_DB_BACKUP, CONFIG_DB)) + if duthost.is_multi_asic: + for n in range(len(duthost.asics)): + asic_config_db = "/etc/sonic/config_db{}.json".format(n) + asic_config_db_backup = "/etc/sonic/config_db{}.json.{}".format(n, file_postfix) + logger.info("Restore {} with {} on {}".format( + asic_config_db, asic_config_db_backup, duthost.hostname)) + duthost.shell("mv {} {}".format(asic_config_db_backup, asic_config_db)) + + if config_reload: + config_reload(duthost) diff --git a/tests/common/helpers/dut_utils.py b/tests/common/helpers/dut_utils.py index e7307c7c4dc..093900418c5 100644 --- a/tests/common/helpers/dut_utils.py +++ b/tests/common/helpers/dut_utils.py @@ -295,18 +295,23 @@ def verify_features_state(duthost): return True -def verify_orchagent_running_or_assert(duthost): +def verify_orchagent_running_or_assert(duthost, asic_id=None): """ Verifies that orchagent is running, asserts otherwise Args: duthost: Device Under Test (DUT) + asic_id: Asic ID to verify. If None verifies for all asics that duthost contains. """ def _orchagent_running(): - cmds = 'docker exec swss supervisorctl status orchagent' - output = duthost.shell(cmds, module_ignore_errors=True) - pytest_assert(not output['rc'], "Unable to check orchagent status output") + asic_ids = duthost.get_asic_ids() if asic_id is None else [asic_id] + for asic in asic_ids: + cmd = 'docker exec swss supervisorctl status orchagent' + if asic is not None: + cmd = 'docker exec swss{} supervisorctl status orchagent'.format(asic) + output = duthost.shell(cmd, module_ignore_errors=True) + pytest_assert(not output['rc'], "Unable to check orchagent status output for asic_id {}".format(asic)) return 'RUNNING' in output['stdout'] pytest_assert( diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index f651c68d71b..59b894ad7a1 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -689,9 +689,9 @@ fib/test_fib.py::test_ipinip_hash: ####################################### generic_config_updater: skip: - reason: 'generic_config_updater is not a supported feature for T2' + reason: 'generic_config_updater is not a supported feature for T2 platform on older releases than 202205.' conditions: - - "'t2' in topo_name" + - "('t2' in topo_name) and (release in ['201811', '201911', '202012', '202106', '202111'])" generic_config_updater/test_dhcp_relay.py: skip: diff --git a/tests/conftest.py b/tests/conftest.py index 43115f37ec9..92ed7c06f5b 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -518,6 +518,23 @@ def rand_one_dut_lossless_prio(request): return lossless_prio_list[0] +@pytest.fixture(scope="module") +def rand_asic_namespace(duthosts, rand_one_dut_hostname): + """ + Return the randomly selected asic namespace in case of multi-asic duthost. + """ + duthost = duthosts[rand_one_dut_hostname] + + asic_namespace = None + asic_index = None + if duthost.is_multi_asic: + namespace_list = duthost.get_asic_namespace_list() + asic_namespace = random.choice(namespace_list) + asic_index = duthost.get_asic_id_from_namespace(asic_namespace) + + return asic_namespace, asic_index + + @pytest.fixture(scope="module", autouse=True) def reset_critical_services_list(duthosts): """ @@ -1933,7 +1950,7 @@ def dut_test_params_qos(duthosts, tbinfo, ptfhost, get_src_dst_asic_and_duts, lo yield rtn_dict -@ pytest.fixture(scope='class') +@pytest.fixture(scope='class') def dut_test_params(duthosts, enum_rand_one_per_hwsku_frontend_hostname, tbinfo, ptf_portmap_file, lower_tor_host, creds): # noqa F811 """ diff --git a/tests/generic_config_updater/conftest.py b/tests/generic_config_updater/conftest.py index 6e14fd1374e..baaf314e3ae 100644 --- a/tests/generic_config_updater/conftest.py +++ b/tests/generic_config_updater/conftest.py @@ -3,9 +3,10 @@ from tests.common.utilities import skip_release from tests.common.config_reload import config_reload -from tests.common.gu_utils import apply_patch +from tests.common.gu_utils import apply_patch, restore_backup_test_config, save_backup_test_config from tests.common.gu_utils import generate_tmpfile, delete_tmpfile + CONFIG_DB = "/etc/sonic/config_db.json" CONFIG_DB_BACKUP = "/etc/sonic/config_db.json.before_gcu_test" @@ -14,7 +15,7 @@ # Module Fixture @pytest.fixture(scope="module") -def cfg_facts(duthosts, rand_one_dut_hostname): +def cfg_facts(duthosts, rand_one_dut_hostname, rand_asic_namespace): """ Config facts for selected DUT Args: @@ -22,7 +23,8 @@ def cfg_facts(duthosts, rand_one_dut_hostname): rand_one_dut_hostname: Hostname of a random chosen dut """ duthost = duthosts[rand_one_dut_hostname] - return duthost.config_facts(host=duthost.hostname, source="persistent")['ansible_facts'] + asic_namespace, asic_id = rand_asic_namespace + return duthost.config_facts(host=duthost.hostname, source="persistent", namespace=asic_namespace)['ansible_facts'] @pytest.fixture(scope="module", autouse=True) @@ -62,9 +64,7 @@ def reset_and_restore_test_environment(duthosts, rand_one_dut_hostname): finally: delete_tmpfile(duthost, tmpfile) - logger.info("Backup {} to {} on {}".format( - CONFIG_DB, CONFIG_DB_BACKUP, duthost.hostname)) - duthost.shell("cp {} {}".format(CONFIG_DB, CONFIG_DB_BACKUP)) + save_backup_test_config(duthost, file_postfix="before_gcu_test") if output['rc'] or "Patch applied successfully" not in output['stdout']: logger.info("Running config failed SONiC Yang validation. Reload minigraph. config: {}" @@ -73,9 +73,7 @@ def reset_and_restore_test_environment(duthosts, rand_one_dut_hostname): yield - logger.info("Restore {} with {} on {}".format( - CONFIG_DB, CONFIG_DB_BACKUP, duthost.hostname)) - duthost.shell("mv {} {}".format(CONFIG_DB_BACKUP, CONFIG_DB)) + restore_backup_test_config(duthost, file_postfix="before_gcu_test", config_reload=False) if output['rc'] or "Patch applied successfully" not in output['stdout']: logger.info("Restore Config after GCU test.") diff --git a/tests/generic_config_updater/test_aaa.py b/tests/generic_config_updater/test_aaa.py index 52bdaeffa57..61976745bfb 100644 --- a/tests/generic_config_updater/test_aaa.py +++ b/tests/generic_config_updater/test_aaa.py @@ -160,11 +160,11 @@ def aaa_tc1_add_config(duthost): "login": "tacacs+,local" } } - + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/AAA", + "path": "{}/AAA".format(json_namespace), "value": aaa_config } ] @@ -189,20 +189,21 @@ def aaa_tc1_add_config(duthost): def aaa_tc1_replace(duthost): """ Test replace option value in each AAA sub type """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "replace", - "path": "/AAA/authorization/login", + "path": "{}/AAA/authorization/login".format(json_namespace), "value": "tacacs+" }, { "op": "replace", - "path": "/AAA/authentication/login", + "path": "{}/AAA/authentication/login".format(json_namespace), "value": "tacacs+" }, { "op": "replace", - "path": "/AAA/accounting/login", + "path": "{}/AAA/accounting/login".format(json_namespace), "value": "tacacs+" } ] @@ -226,20 +227,21 @@ def aaa_tc1_replace(duthost): def aaa_tc1_add_duplicate(duthost): """ Test add duplicate config in AAA sub type """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/AAA/authorization/login", + "path": "{}/AAA/authorization/login".format(json_namespace), "value": "tacacs+" }, { "op": "add", - "path": "/AAA/authentication/login", + "path": "{}/AAA/authentication/login".format(json_namespace), "value": "tacacs+" }, { "op": "add", - "path": "/AAA/accounting/login", + "path": "{}/AAA/accounting/login".format(json_namespace), "value": "tacacs+" } ] @@ -263,10 +265,11 @@ def aaa_tc1_add_duplicate(duthost): def aaa_tc1_remove(duthost): """ Test remove AAA config check if it returns to default setup """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/AAA" + "path": "{}/AAA".format(json_namespace) } ] @@ -291,6 +294,7 @@ def test_tc1_aaa_suite(rand_selected_dut): """ This test is for default setting when configDB doesn't contian AAA table. So we remove AAA config at first. """ + aaa_add_init_config_without_table(rand_selected_dut) # Recent AAA YANG update that passkey in TACPLUS must exist first for authorization tacacs+ # Since tc2 it will clean and retest TACPLUS table, we don't care TACPLUS residue after tc1 @@ -304,10 +308,11 @@ def test_tc1_aaa_suite(rand_selected_dut): def tacacs_global_tc2_add_config(duthost): """ Test add tacacs global config """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/TACPLUS", + "path": "{}/TACPLUS".format(json_namespace), "value": { "global": TACACS_ADD_CONFIG } @@ -340,11 +345,12 @@ def tacacs_global_tc2_invalid_input(duthost): ("passkey", " 123"), ("timeout", "0") ] + json_namespace = '/localhost' if duthost.is_multi_asic else '' for tacacs_global_type, invalid_input in xfail_input: json_patch = [ { "op": "add", - "path": "/TACPLUS", + "path": "{}/TACPLUS".format(json_namespace), "value": { "global": { tacacs_global_type: invalid_input @@ -366,10 +372,11 @@ def tacacs_global_tc2_invalid_input(duthost): def tacacs_global_tc2_duplicate_input(duthost): """ Test tacacs global duplicate input """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/TACPLUS", + "path": "{}/TACPLUS".format(json_namespace), "value": { "global": TACACS_ADD_CONFIG } @@ -394,10 +401,11 @@ def tacacs_global_tc2_duplicate_input(duthost): def tacacs_global_tc2_remove(duthost): """ Test tacacs global config removal """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/TACPLUS" + "path": "{}/TACPLUS".format(json_namespace) } ] tmpfile = generate_tmpfile(duthost) @@ -419,6 +427,7 @@ def test_tc2_tacacs_global_suite(rand_selected_dut): """ This test is for default setting when configDB doesn't contian TACACS table. So we remove TACACS config at first. """ + aaa_add_init_config_without_table(rand_selected_dut) tacacs_add_init_config_without_table(rand_selected_dut) tacacs_global_tc2_add_config(rand_selected_dut) @@ -431,10 +440,11 @@ def tacacs_server_tc3_add_init(duthost): """ Test tacacs server addition """ ip_address, ipv6_address = DEFAULT_TACACS_SERVER, "fc10::21" + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/TACPLUS_SERVER", + "path": "{}/TACPLUS_SERVER".format(json_namespace), "value": { ip_address: TACACS_SERVER_OPTION, @@ -471,12 +481,12 @@ def tacacs_server_tc3_add_max(duthost): """ # 2 servers exist. Add another 7 servers to exceed max. servers = ["10.0.0.{}".format(i) for i in range(10, 17)] - + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [] for server in servers: patch = { "op": "add", - "path": "/TACPLUS_SERVER/{}".format(server), + "path": "{}/TACPLUS_SERVER/{}".format(json_namespace, server), "value": {} } json_patch.append(patch) @@ -509,11 +519,12 @@ def tacacs_server_tc3_replace_invalid(duthost): ("tcp_port", "65536"), ("timeout", "0") ] + json_namespace = '/localhost' if duthost.is_multi_asic else '' for tacacs_server_options, invalid_input in xfail_input: json_patch = [ { "op": "replace", - "path": "/TACPLUS_SERVER", + "path": "{}/TACPLUS_SERVER".format(json_namespace), "value": { DEFAULT_TACACS_SERVER: { tacacs_server_options: invalid_input @@ -535,10 +546,11 @@ def tacacs_server_tc3_replace_invalid(duthost): def tacacs_server_tc3_add_duplicate(duthost): """ Test tacacs server add duplicate server """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/TACPLUS_SERVER/{}".format(DEFAULT_TACACS_SERVER), + "path": "{}/TACPLUS_SERVER/{}".format(json_namespace, DEFAULT_TACACS_SERVER), "value": TACACS_SERVER_OPTION } ] @@ -560,10 +572,11 @@ def tacacs_server_tc3_add_duplicate(duthost): def tacacs_server_tc3_remove(duthost): """ Test tacasc server removal """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/TACPLUS_SERVER" + "path": "{}/TACPLUS_SERVER".format(json_namespace) } ] @@ -584,6 +597,7 @@ def test_tacacs_server_tc3_suite(rand_selected_dut): """ Due to kvm t0 and testbed t0 has different tacacs server predefined, so we cleanup tacacs servers then test on mannual setup. """ + cleanup_tacacs_server(rand_selected_dut) tacacs_server_tc3_add_init(rand_selected_dut) tacacs_server_tc3_add_max(rand_selected_dut) diff --git a/tests/generic_config_updater/test_bgp_prefix.py b/tests/generic_config_updater/test_bgp_prefix.py index 3f40de54ed9..4d47afbb819 100644 --- a/tests/generic_config_updater/test_bgp_prefix.py +++ b/tests/generic_config_updater/test_bgp_prefix.py @@ -8,7 +8,7 @@ from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ - pytest.mark.topology('t1'), # It is a t1 only feature + pytest.mark.topology('t1', 't2'), ] logger = logging.getLogger(__name__) @@ -75,19 +75,21 @@ def setup_env(duthosts, rand_one_dut_hostname): delete_checkpoint(duthost) -def bgp_prefix_test_setup(duthost): +def bgp_prefix_test_setup(duthost, namespace=None): """ Clean up bgp prefix config before test """ - cmds = 'sonic-db-cli CONFIG_DB del "BGP_ALLOWED_PREFIXES|*"' + namespace_prefix = '' if namespace is None else '-n ' + namespace + cmds = 'sonic-db-cli {} CONFIG_DB del "BGP_ALLOWED_PREFIXES|*"'.format(namespace_prefix) output = duthost.shell(cmds) pytest_assert(not output['rc'], "bgp prefix test setup failed.") -def show_bgp_running_config(duthost): - return duthost.shell("show runningconfiguration bgp")['stdout'] +def show_bgp_running_config(duthost, namespace=None): + namespace_prefix = '' if namespace is None else '-n ' + namespace + return duthost.shell("show runningconfiguration bgp {}".format(namespace_prefix))['stdout'] -def bgp_prefix_tc1_add_config(duthost, community, community_table): +def bgp_prefix_tc1_add_config(duthost, community, community_table, namespace=None): """ Test to add prefix config Sample output of runningconfiguration bgp after config @@ -99,10 +101,12 @@ def bgp_prefix_tc1_add_config(duthost, community, community_table): ip prefix-list PL_ALLOW_LIST_DEPLOYMENT_ID_0_COMMUNITY_1010:1010_V4 seq 30 permit 10.20.0.0/16 le 32 ipv6 prefix-list PL_ALLOW_LIST_DEPLOYMENT_ID_0_COMMUNITY_1010:1010_V6 seq 40 permit fc02:20::/64 le 128 """ + + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/BGP_ALLOWED_PREFIXES", + "path": "{}/BGP_ALLOWED_PREFIXES".format(json_namespace), "value": { "DEPLOYMENT_ID|0{}".format(community_table): { "prefixes_v4": [ @@ -123,7 +127,7 @@ def bgp_prefix_tc1_add_config(duthost, community, community_table): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - bgp_config = show_bgp_running_config(duthost) + bgp_config = show_bgp_running_config(duthost, namespace) pytest_assert(re.search(PREFIXES_V4_RE.format(community, PREFIXES_V4_INIT), bgp_config), "Failed to add bgp prefix v4 config.") pytest_assert(re.search(PREFIXES_V6_RE.format(community, PREFIXES_V6_INIT), bgp_config), @@ -133,7 +137,7 @@ def bgp_prefix_tc1_add_config(duthost, community, community_table): delete_tmpfile(duthost, tmpfile) -def bgp_prefix_tc1_xfail(duthost, community_table): +def bgp_prefix_tc1_xfail(duthost, community_table, namespace=None): """ Test input with invalid prefixes """ xfail_input = [ @@ -142,16 +146,19 @@ def bgp_prefix_tc1_xfail(duthost, community_table): ("remove", PREFIXES_V4_DUMMY, PREFIXES_V6_INIT), # Unexisted v4 prefix ("remove", PREFIXES_V4_INIT, PREFIXES_V6_DUMMY) # Unexisted v6 prefix ] + json_namespace = '' if namespace is None else '/' + namespace for op, prefixes_v4, prefixes_v6 in xfail_input: json_patch = [ { "op": op, - "path": "/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v6/0".format(community_table), + "path": "{}/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v6/0".format( + json_namespace, community_table), "value": prefixes_v6 }, { "op": op, - "path": "/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v4/0".format(community_table), + "path": "{}/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v4/0".format( + json_namespace, community_table), "value": prefixes_v4 } ] @@ -167,18 +174,19 @@ def bgp_prefix_tc1_xfail(duthost, community_table): delete_tmpfile(duthost, tmpfile) -def bgp_prefix_tc1_replace(duthost, community, community_table): +def bgp_prefix_tc1_replace(duthost, community, community_table, namespace=None): """ Test to replace prefixes """ + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "replace", - "path": "/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v6/0".format(community_table), + "path": "{}/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v6/0".format(json_namespace, community_table), "value": PREFIXES_V6_DUMMY }, { "op": "replace", - "path": "/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v4/0".format(community_table), + "path": "{}/BGP_ALLOWED_PREFIXES/DEPLOYMENT_ID|0{}/prefixes_v4/0".format(json_namespace, community_table), "value": PREFIXES_V4_DUMMY } ] @@ -190,7 +198,7 @@ def bgp_prefix_tc1_replace(duthost, community, community_table): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - bgp_config = show_bgp_running_config(duthost) + bgp_config = show_bgp_running_config(duthost, namespace) pytest_assert( not re.search(PREFIXES_V4_RE.format(community, PREFIXES_V4_INIT), bgp_config) and re.search(PREFIXES_V4_RE.format(community, PREFIXES_V4_DUMMY), bgp_config), @@ -206,13 +214,14 @@ def bgp_prefix_tc1_replace(duthost, community, community_table): delete_tmpfile(duthost, tmpfile) -def bgp_prefix_tc1_remove(duthost, community): +def bgp_prefix_tc1_remove(duthost, community, namespace=None): """ Test to remove prefix config """ + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/BGP_ALLOWED_PREFIXES" + "path": "{}/BGP_ALLOWED_PREFIXES".format(json_namespace) } ] @@ -223,7 +232,7 @@ def bgp_prefix_tc1_remove(duthost, community): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - bgp_config = show_bgp_running_config(duthost) + bgp_config = show_bgp_running_config(duthost, namespace) pytest_assert( not re.search(PREFIXES_V4_RE.format(community, PREFIXES_V4_DUMMY), bgp_config), "Failed to remove bgp prefix v4 config." @@ -238,17 +247,18 @@ def bgp_prefix_tc1_remove(duthost, community): @pytest.mark.parametrize("community", ["empty", "1010:1010"]) -def test_bgp_prefix_tc1_suite(rand_selected_dut, community): +def test_bgp_prefix_tc1_suite(rand_selected_dut, rand_asic_namespace, community): """ Test suite for bgp prefix for v4 and v6 w/ and w/o community ID Sample CONFIG_DB entry: BGP_ALLOWED_PREFIXES|DEPLOYMENT_ID|0 BGP_ALLOWED_PREFIXES|DEPLOYMENT_ID|0|1010:1010 """ + asic_namespace, asic_id = rand_asic_namespace community_table = "" if community == "empty" else "|" + community - bgp_prefix_test_setup(rand_selected_dut) - bgp_prefix_tc1_add_config(rand_selected_dut, community, community_table) - bgp_prefix_tc1_xfail(rand_selected_dut, community_table) - bgp_prefix_tc1_replace(rand_selected_dut, community, community_table) - bgp_prefix_tc1_remove(rand_selected_dut, community) + bgp_prefix_test_setup(rand_selected_dut, namespace=asic_namespace) + bgp_prefix_tc1_add_config(rand_selected_dut, community, community_table, namespace=asic_namespace) + bgp_prefix_tc1_xfail(rand_selected_dut, community_table, namespace=asic_namespace) + bgp_prefix_tc1_replace(rand_selected_dut, community, community_table, namespace=asic_namespace) + bgp_prefix_tc1_remove(rand_selected_dut, community, namespace=asic_namespace) diff --git a/tests/generic_config_updater/test_bgpl.py b/tests/generic_config_updater/test_bgpl.py index b42ad26e662..b7e722f14b0 100644 --- a/tests/generic_config_updater/test_bgpl.py +++ b/tests/generic_config_updater/test_bgpl.py @@ -10,16 +10,17 @@ from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload pytestmark = [ - pytest.mark.topology('t0', 'm0', 'mx'), + pytest.mark.topology('t0', 'm0', 'mx', 't2'), ] logger = logging.getLogger(__name__) -def get_bgp_monitor_runningconfig(duthost): +def get_bgp_monitor_runningconfig(duthost, namespace=None): """ Get bgp listener config """ - cmds = "show runningconfiguration bgp" + namespace_prefix = '' if namespace is None else '-n ' + namespace + cmds = "show runningconfiguration bgp {}".format(namespace_prefix) output = duthost.shell(cmds) pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) @@ -31,7 +32,7 @@ def get_bgp_monitor_runningconfig(duthost): @pytest.fixture(autouse=True) -def setup_env(duthosts, rand_one_dut_hostname): +def setup_env(duthosts, rand_one_dut_hostname, rand_asic_namespace): """ Setup/teardown fixture for bgpmon config Args: @@ -39,7 +40,8 @@ def setup_env(duthosts, rand_one_dut_hostname): rand_selected_dut: The fixture returns a randomly selected DuT. """ duthost = duthosts[rand_one_dut_hostname] - original_bgp_listener_config = get_bgp_monitor_runningconfig(duthost) + asic_namespace, asic_id = rand_asic_namespace + original_bgp_listener_config = get_bgp_monitor_runningconfig(duthost, namespace=asic_namespace) create_checkpoint(duthost) yield @@ -70,35 +72,39 @@ def bgpmon_setup_info(rand_selected_dut): return peer_addr, local_addr, str(mg_facts['minigraph_bgp_asn']) -def bgpmon_cleanup_config(duthost): +def bgpmon_cleanup_config(duthost, namespace=None): """ Clean up BGPMONITOR config to make sure t0 is not broken by other tests """ - cmds = 'sonic-db-cli CONFIG_DB keys "BGP_MONITORS|*" | xargs -r sonic-db-cli CONFIG_DB del' + namespace_prefix = '' if namespace is None else '-n ' + namespace + cmds = 'sonic-db-cli {} CONFIG_DB keys "BGP_MONITORS|*" | xargs -r sonic-db-cli CONFIG_DB del'.format( + namespace_prefix) output = duthost.shell(cmds) pytest_assert(not output['rc'], "bgpmon cleanup config failed") -def check_bgpmon_with_addr(duthost, addr): +def check_bgpmon_with_addr(duthost, addr, namespace=None): """ Check BGP MONITOR config change is taken into effect """ - cmds = "show ip bgp summary | grep -w {}".format(addr) + namespace_prefix = '' if namespace is None else '-n ' + namespace + cmds = "show ip bgp summary {} | grep -w {}".format(namespace_prefix, addr) output = duthost.shell(cmds) pytest_assert(not output['rc'], "BGPMonitor with addr {} is not being setup.".format(addr)) -def bgpmon_tc1_add_init(duthost, bgpmon_setup_info): +def bgpmon_tc1_add_init(duthost, bgpmon_setup_info, namespace=None): """ Test to add initial bgpmon config Make sure bgpmon is cleaned up for current topo. Then test to add initial setup for bgpmon. """ - bgpmon_cleanup_config(duthost) + bgpmon_cleanup_config(duthost, namespace) peer_addr, local_addr, bgp_asn = bgpmon_setup_info + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/BGP_MONITORS", + "path": "{}/BGP_MONITORS".format(json_namespace), "value": { peer_addr: { "admin_status": "up", @@ -121,19 +127,20 @@ def bgpmon_tc1_add_init(duthost, bgpmon_setup_info): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - check_bgpmon_with_addr(duthost, peer_addr) + check_bgpmon_with_addr(duthost, peer_addr, namespace) finally: delete_tmpfile(duthost, tmpfile) -def bgpmon_tc1_add_duplicate(duthost, bgpmon_setup_info): +def bgpmon_tc1_add_duplicate(duthost, bgpmon_setup_info, namespace=None): """ Test to add duplicate config to bgpmon """ peer_addr, local_addr, bgp_asn = bgpmon_setup_info + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/BGP_MONITORS/{}".format(peer_addr), + "path": "{}/BGP_MONITORS/{}".format(json_namespace, peer_addr), "value": { "admin_status": "up", "asn": bgp_asn, @@ -154,19 +161,20 @@ def bgpmon_tc1_add_duplicate(duthost, bgpmon_setup_info): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - check_bgpmon_with_addr(duthost, peer_addr) + check_bgpmon_with_addr(duthost, peer_addr, namespace) finally: delete_tmpfile(duthost, tmpfile) -def bgpmon_tc1_admin_change(duthost, bgpmon_setup_info): +def bgpmon_tc1_admin_change(duthost, bgpmon_setup_info, namespace=None): """ Test to admin down bgpmon config """ peer_addr, _, _ = bgpmon_setup_info + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "replace", - "path": "/BGP_MONITORS/{}/admin_status".format(peer_addr), + "path": "{}/BGP_MONITORS/{}/admin_status".format(json_namespace, peer_addr), "value": "down" } ] @@ -186,20 +194,21 @@ def bgpmon_tc1_admin_change(duthost, bgpmon_setup_info): delete_tmpfile(duthost, tmpfile) -def bgpmon_tc1_ip_change(duthost, bgpmon_setup_info): +def bgpmon_tc1_ip_change(duthost, bgpmon_setup_info, namespace=None): """ Test to replace bgpmon ip address """ peer_addr, local_addr, bgp_asn = bgpmon_setup_info peer_addr_replaced = generate_ip_through_default_route(duthost, [IPNetwork(peer_addr).ip]) peer_addr_replaced = str(IPNetwork(peer_addr_replaced).ip) + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/BGP_MONITORS/{}".format(peer_addr) + "path": "{}/BGP_MONITORS/{}".format(json_namespace, peer_addr) }, { "op": "add", - "path": "/BGP_MONITORS/{}".format(peer_addr_replaced), + "path": "{}/BGP_MONITORS/{}".format(json_namespace, peer_addr_replaced), "value": { "admin_status": "up", "asn": bgp_asn, @@ -220,18 +229,20 @@ def bgpmon_tc1_ip_change(duthost, bgpmon_setup_info): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - check_bgpmon_with_addr(duthost, peer_addr_replaced) + check_bgpmon_with_addr(duthost, peer_addr_replaced, namespace) finally: delete_tmpfile(duthost, tmpfile) -def bgpmon_tc1_remove(duthost): +def bgpmon_tc1_remove(duthost, namespace=None): """ Test to remove bgpmon config """ + namespace_prefix = '' if namespace is None else '-n ' + namespace + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/BGP_MONITORS" + "path": "{}/BGP_MONITORS".format(json_namespace) } ] @@ -242,18 +253,19 @@ def bgpmon_tc1_remove(duthost): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - output = duthost.shell("show ip bgp summary") + output = duthost.shell("show ip bgp summary {}".format(namespace_prefix)) pytest_assert(not output['rc'], "Failed to get info from BGP summary") pytest_assert("BGPMonitor" not in output['stdout'], "Failed to remove BGPMonitor") finally: delete_tmpfile(duthost, tmpfile) -def test_bgpmon_tc1_add_and_remove(rand_selected_dut, bgpmon_setup_info): +def test_bgpmon_tc1_add_and_remove(rand_selected_dut, bgpmon_setup_info, rand_asic_namespace): """ Test to verify bgpmon config addition and deletion """ - bgpmon_tc1_add_init(rand_selected_dut, bgpmon_setup_info) - bgpmon_tc1_add_duplicate(rand_selected_dut, bgpmon_setup_info) - bgpmon_tc1_admin_change(rand_selected_dut, bgpmon_setup_info) - bgpmon_tc1_ip_change(rand_selected_dut, bgpmon_setup_info) - bgpmon_tc1_remove(rand_selected_dut) + asic_namespace, asic_id = rand_asic_namespace + bgpmon_tc1_add_init(rand_selected_dut, bgpmon_setup_info, asic_namespace) + bgpmon_tc1_add_duplicate(rand_selected_dut, bgpmon_setup_info, asic_namespace) + bgpmon_tc1_admin_change(rand_selected_dut, bgpmon_setup_info, asic_namespace) + bgpmon_tc1_ip_change(rand_selected_dut, bgpmon_setup_info, asic_namespace) + bgpmon_tc1_remove(rand_selected_dut, asic_namespace) diff --git a/tests/generic_config_updater/test_cacl.py b/tests/generic_config_updater/test_cacl.py index 2631db44598..17e93c5788a 100644 --- a/tests/generic_config_updater/test_cacl.py +++ b/tests/generic_config_updater/test_cacl.py @@ -18,24 +18,27 @@ # SSH_ONLY CTRLPLANE SSH SSH_ONLY ingress pytestmark = [ - pytest.mark.topology('t0', 'm0', 'mx', 't1'), + pytest.mark.topology('t0', 'm0', 'mx', 't1', 't2'), ] logger = logging.getLogger(__name__) -def get_cacl_tables(duthost): - """Get acl control palne tables +def get_cacl_tables(duthost, namespace=None): + """Get acl control plane tables """ - cmds = "show acl table | grep -w CTRLPLANE | awk '{print $1}'" + namespace_prefix = '' if namespace is None else 'sudo ip netns exec ' + namespace + cmds = "{} show acl table | grep -w CTRLPLANE | awk '{{print $1}}'".format(namespace_prefix) + output = duthost.shell(cmds) pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) cacl_tables = output['stdout'].splitlines() return cacl_tables -def get_iptable_rules(duthost): - cmds = "iptables -S" +def get_iptable_rules(duthost, namespace=None): + namespace_prefix = '' if namespace is None else 'sudo ip netns exec ' + namespace + cmds = "{} iptables -S".format(namespace_prefix) output = duthost.shell(cmds) pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) rules_chain = output['stdout'].splitlines() @@ -55,16 +58,18 @@ def disable_port_toggle(duthosts, tbinfo): @pytest.fixture(autouse=True) -def setup_env(duthosts, rand_one_dut_hostname): +def setup_env(duthosts, rand_one_dut_hostname, rand_asic_namespace): """ Setup/teardown fixture for acl config Args: duthosts: list of DUTs. rand_selected_dut: The fixture returns a randomly selected DuT. + rand_asic_namespace: The fixture returns a randomly selected asic namespace. """ duthost = duthosts[rand_one_dut_hostname] - original_iptable_rules = get_iptable_rules(duthost) - original_cacl_tables = get_cacl_tables(duthost) + namespace, asic_id = rand_asic_namespace + original_iptable_rules = get_iptable_rules(duthost, namespace) + original_cacl_tables = get_cacl_tables(duthost, namespace) create_checkpoint(duthost) yield @@ -73,7 +78,7 @@ def setup_env(duthosts, rand_one_dut_hostname): logger.info("Rolled back to original checkpoint") rollback_or_reload(duthost) - current_iptable_rules = get_iptable_rules(duthost) + current_iptable_rules = get_iptable_rules(duthost, namespace) logger.info("original iptable rules: {}, current iptable rules: {}".format( original_iptable_rules, current_iptable_rules) ) @@ -87,7 +92,7 @@ def setup_env(duthosts, rand_one_dut_hostname): iptable_rules_diff) ) - current_cacl_tables = get_cacl_tables(duthost) + current_cacl_tables = get_cacl_tables(duthost, namespace) logger.info("original cacl tables: {}, current cacl tables: {}".format( original_cacl_tables, current_cacl_tables) ) @@ -104,10 +109,11 @@ def setup_env(duthosts, rand_one_dut_hostname): delete_checkpoint(duthost) -def expect_acl_table_match(duthost, table_name, expected_content_list): +def expect_acl_table_match(duthost, table_name, expected_content_list, namespace=None): """Check if acl table show as expected """ - cmds = "show acl table {}".format(table_name) + namespace_prefix = '' if namespace is None else 'sudo ip netns exec ' + namespace + cmds = "{} show acl table {}".format(namespace_prefix, table_name) output = duthost.shell(cmds) pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) @@ -123,18 +129,19 @@ def expect_acl_table_match(duthost, table_name, expected_content_list): pytest_assert(set(expected_content_list) == set(actual_list), "ACL table doesn't match") -def expect_res_success_acl_rule(duthost, expected_content_list, unexpected_content_list): +def expect_res_success_acl_rule(duthost, expected_content_list, unexpected_content_list, namespace=None): """Check if acl rule added as expected """ time.sleep(1) # Sleep 1 sec to ensure caclmgrd does update in case of its UPDATE_DELAY_SECS 0.5s - cmds = "iptables -S" + namespace_prefix = '' if namespace is None else 'sudo ip netns exec ' + namespace + cmds = "{} iptables -S".format(namespace_prefix) output = duthost.shell(cmds) pytest_assert(not output['rc'], "'{}' failed with rc={}".format(cmds, output['rc'])) expect_res_success(duthost, output, expected_content_list, unexpected_content_list) -def cacl_tc1_add_new_table(duthost, protocol): +def cacl_tc1_add_new_table(duthost, protocol, namespace=None): """ Add acl table for test Sample output @@ -144,10 +151,11 @@ def cacl_tc1_add_new_table(duthost, protocol): SNMP_TEST_1 CTRLPLANE SNMP SNMP_Test_Table_1 ingress Active """ table = "{}_TEST_1".format(protocol) + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/ACL_TABLE/{}".format(table), + "path": "{}/ACL_TABLE/{}".format(json_namespace, table), "value": { "policy_desc": "{}_Test_Table_1".format(protocol), "services": [ @@ -167,22 +175,23 @@ def cacl_tc1_add_new_table(duthost, protocol): expect_op_success(duthost, output) expected_content_list = [table, "CTRLPLANE", protocol, "{}_Test_Table_1".format(protocol), "ingress"] - expect_acl_table_match(duthost, table, expected_content_list) + expect_acl_table_match(duthost, table, expected_content_list, namespace) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc1_add_duplicate_table(duthost, protocol): +def cacl_tc1_add_duplicate_table(duthost, protocol, namespace=None): """ Add duplicate acl table """ if protocol == 'SSH': table_name = "SSH_ONLY" else: table_name = "{}_ACL".format(protocol) + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/ACL_TABLE/{}".format(table_name), + "path": "{}/ACL_TABLE/{}".format(json_namespace, table_name), "value": { "policy_desc": table_name, "services": [ @@ -204,7 +213,7 @@ def cacl_tc1_add_duplicate_table(duthost, protocol): delete_tmpfile(duthost, tmpfile) -def cacl_tc1_replace_table_variable(duthost, protocol): +def cacl_tc1_replace_table_variable(duthost, protocol, namespace=None): """ Replace acl table with SSH service Expected output @@ -213,22 +222,23 @@ def cacl_tc1_replace_table_variable(duthost, protocol): ---------- --------- --------------- ------------- ------- SNMP_ACL CTRLPLANE SNMP SNMP_TO_SSH egress """ + json_namespace = '' if namespace is None else '/' + namespace if protocol == 'SSH': table_name = "SSH_ONLY" json_patch = [ { "op": "replace", - "path": "/ACL_TABLE/{}/stage".format(table_name), + "path": "{}/ACL_TABLE/{}/stage".format(json_namespace, table_name), "value": "egress" }, { "op": "replace", - "path": "/ACL_TABLE/{}/services/0".format(table_name), + "path": "{}/ACL_TABLE/{}/services/0".format(json_namespace, table_name), "value": "NTP" }, { "op": "replace", - "path": "/ACL_TABLE/{}/policy_desc".format(table_name), + "path": "{}/ACL_TABLE/{}/policy_desc".format(json_namespace, table_name), "value": "{}_TO_NTP".format(protocol) } ] @@ -237,17 +247,17 @@ def cacl_tc1_replace_table_variable(duthost, protocol): json_patch = [ { "op": "replace", - "path": "/ACL_TABLE/{}/stage".format(table_name), + "path": "{}/ACL_TABLE/{}/stage".format(json_namespace, table_name), "value": "egress" }, { "op": "replace", - "path": "/ACL_TABLE/{}/services/0".format(table_name), + "path": "{}/ACL_TABLE/{}/services/0".format(json_namespace, table_name), "value": "SSH" }, { "op": "replace", - "path": "/ACL_TABLE/{}/policy_desc".format(table_name), + "path": "{}/ACL_TABLE/{}/policy_desc".format(json_namespace, table_name), "value": "{}_TO_SSH".format(protocol) } ] @@ -264,12 +274,12 @@ def cacl_tc1_replace_table_variable(duthost, protocol): else: expected_content_list = [table_name, "CTRLPLANE", "SSH", "{}_TO_SSH".format(protocol), "egress"] - expect_acl_table_match(duthost, table_name, expected_content_list) + expect_acl_table_match(duthost, table_name, expected_content_list, namespace) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc1_add_invalid_table(duthost, protocol): +def cacl_tc1_add_invalid_table(duthost, protocol, namespace=None): """ Add invalid acl table {"service": "SSH", "stage": "ogress", "type": "CTRLPLANE"}, # wrong stage @@ -279,12 +289,12 @@ def cacl_tc1_add_invalid_table(duthost, protocol): {"service": protocol, "stage": "ogress", "type": "CTRLPLANE"}, {"service": protocol, "stage": "ingress", "type": "TRLPLANE"} ] - + json_namespace = '' if namespace is None else '/' + namespace for ele in invalid_table: json_patch = [ { "op": "add", - "path": "/ACL_TABLE/TEST_2", + "path": "{}/ACL_TABLE/TEST_2".format(json_namespace), "value": { "policy_desc": "Test_Table_2", "services": [ @@ -306,13 +316,14 @@ def cacl_tc1_add_invalid_table(duthost, protocol): delete_tmpfile(duthost, tmpfile) -def cacl_tc1_remove_unexisted_table(duthost): +def cacl_tc1_remove_unexisted_table(duthost, namespace=None): """ Remove unexisted acl table """ + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/ACL_RULE/SSH_ONLY_UNEXISTED" + "path": "{}/ACL_RULE/SSH_ONLY_UNEXISTED".format(json_namespace) } ] @@ -325,17 +336,18 @@ def cacl_tc1_remove_unexisted_table(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc1_remove_table(duthost, protocol): +def cacl_tc1_remove_table(duthost, protocol, namespace=None): """ Remove acl table test """ if protocol == 'SSH': table_name = "SSH_ONLY" else: table_name = "{}_ACL".format(protocol) + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/ACL_TABLE/{}".format(table_name) + "path": "{}/ACL_TABLE/{}".format(json_namespace, table_name) } ] @@ -346,12 +358,12 @@ def cacl_tc1_remove_table(duthost, protocol): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - expect_acl_table_match(duthost, table_name, []) + expect_acl_table_match(duthost, table_name, [], namespace) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc2_add_init_rule(duthost, protocol): +def cacl_tc2_add_init_rule(duthost, protocol, namespace=None): """ Add acl rule for test Check 'ip tables' to make sure rule is actually being applied @@ -383,10 +395,11 @@ def cacl_tc2_add_init_rule(duthost, protocol): params_dict["table"] = "EXTERNAL_CLIENT_ACL" params_dict["IP_PROTOCOL"] = "6" params_dict["L4_DST_PORT"] = "8081" + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/ACL_RULE", + "path": "{}/ACL_RULE".format(json_namespace), "value": { "{}|TEST_DROP".format(params_dict["table"]): { "IP_PROTOCOL": "{}".format(params_dict["IP_PROTOCOL"]), @@ -414,12 +427,12 @@ def cacl_tc2_add_init_rule(duthost, protocol): "-A INPUT -s 9.9.9.9/32 -p udp -m udp --dport 161 -j DROP"] elif protocol == 'EXTERNAL_CLIENT': expected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 8081 -j DROP"] - expect_res_success_acl_rule(duthost, expected_content_list, []) + expect_res_success_acl_rule(duthost, expected_content_list, [], namespace) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc2_add_duplicate_rule(duthost, protocol): +def cacl_tc2_add_duplicate_rule(duthost, protocol, namespace=None): """ Add duplicate acl rule for test """ params_dict = {} @@ -440,10 +453,11 @@ def cacl_tc2_add_duplicate_rule(duthost, protocol): params_dict["table"] = "EXTERNAL_CLIENT_ACL" params_dict["IP_PROTOCOL"] = "6" params_dict["L4_DST_PORT"] = "8081" + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/ACL_RULE", + "path": "{}/ACL_RULE".format(json_namespace), "value": { "{}|TEST_DROP".format(params_dict["table"]): { "IP_PROTOCOL": "{}".format(params_dict["IP_PROTOCOL"]), @@ -467,7 +481,7 @@ def cacl_tc2_add_duplicate_rule(duthost, protocol): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_replace_rule(duthost, protocol): +def cacl_tc2_replace_rule(duthost, protocol, namespace=None): """ Replace a value from acl rule test Check 'ip tables' to make sure rule is actually being applied @@ -488,10 +502,11 @@ def cacl_tc2_replace_rule(duthost, protocol): table = 'NTP_ACL' elif protocol == 'EXTERNAL_CLIENT': table = 'EXTERNAL_CLIENT_ACL' + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "replace", - "path": "/ACL_RULE/{}|TEST_DROP/SRC_IP".format(table), + "path": "{}/ACL_RULE/{}|TEST_DROP/SRC_IP".format(json_namespace, table), "value": "8.8.8.8/32" } ] @@ -515,18 +530,19 @@ def cacl_tc2_replace_rule(duthost, protocol): elif protocol == 'EXTERNAL_CLIENT': expected_content_list = ["-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 8081 -j DROP"] unexpected_content_list = ["-A INPUT -s 9.9.9.9/32 -p tcp -m tcp --dport 8081 -j DROP"] - expect_res_success_acl_rule(duthost, expected_content_list, unexpected_content_list) + expect_res_success_acl_rule(duthost, expected_content_list, unexpected_content_list, namespace) finally: delete_tmpfile(duthost, tmpfile) -def cacl_tc2_add_rule_to_unexisted_table(duthost): +def cacl_tc2_add_rule_to_unexisted_table(duthost, namespace=None): """ Add acl rule to unexisted table """ + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/ACL_RULE/TEST_2|TEST_DROP", + "path": "{}/ACL_RULE/TEST_2|TEST_DROP".format(json_namespace), "value": { "L4_DST_PORT": "22", "IP_PROTOCOL": "6", @@ -548,7 +564,7 @@ def cacl_tc2_add_rule_to_unexisted_table(duthost): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_remove_table_before_rule(duthost, protocol): +def cacl_tc2_remove_table_before_rule(duthost, protocol, namespace=None): """ Remove acl table before removing acl rule """ if protocol == 'SSH': @@ -559,10 +575,11 @@ def cacl_tc2_remove_table_before_rule(duthost, protocol): table = 'NTP_ACL' elif protocol == 'EXTERNAL_CLIENT': table = 'EXTERNAL_CLIENT_ACL' + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/ACL_TABLE/{}".format(table) + "path": "{}/ACL_TABLE/{}".format(json_namespace, table) } ] @@ -576,7 +593,7 @@ def cacl_tc2_remove_table_before_rule(duthost, protocol): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_remove_unexist_rule(duthost, protocol): +def cacl_tc2_remove_unexist_rule(duthost, protocol, namespace=None): """ Remove unexisted acl rule """ if protocol == 'SSH': @@ -587,10 +604,11 @@ def cacl_tc2_remove_unexist_rule(duthost, protocol): table = 'NTP_ACL' elif protocol == 'EXTERNAL_CLIENT': table = 'EXTERNAL_CLIENT_ACL' + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/ACL_RULE/{}|TEST_DROP2".format(table) + "path": "{}/ACL_RULE/{}|TEST_DROP2".format(json_namespace, table) } ] tmpfile = generate_tmpfile(duthost) @@ -602,13 +620,14 @@ def cacl_tc2_remove_unexist_rule(duthost, protocol): delete_tmpfile(duthost, tmpfile) -def cacl_tc2_remove_rule(duthost): +def cacl_tc2_remove_rule(duthost, namespace=None): """ Remove acl rule test """ + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "remove", - "path": "/ACL_RULE" + "path": "{}/ACL_RULE".format(json_namespace) } ] @@ -624,12 +643,12 @@ def cacl_tc2_remove_rule(duthost): "-A INPUT -s 8.8.8.8/32 -p udp -m udp --dport 161 -j DROP", "-A INPUT -s 8.8.8.8/32 -p tcp -m udp --dport 123 -j DROP", "-A INPUT -s 8.8.8.8/32 -p tcp -m tcp --dport 8081 -j DROP"] - expect_res_success_acl_rule(duthost, [], unexpected_content_list) + expect_res_success_acl_rule(duthost, [], unexpected_content_list, namespace) finally: delete_tmpfile(duthost, tmpfile) -def cacl_external_client_add_new_table(duthost): +def cacl_external_client_add_new_table(duthost, namespace=None): """ Add acl table for test Sample output admin@vlab-01:~$ show acl table @@ -637,10 +656,11 @@ def cacl_external_client_add_new_table(duthost): ---------------------- --------- --------------- ---------------------------- ------- -------- EXTERNAL_CLIENT_ACL CTRLPLANE EXTERNAL_CLIENT EXTERNAL_CLIENT_ACL ingress Active """ + json_namespace = '' if namespace is None else '/' + namespace json_patch = [ { "op": "add", - "path": "/ACL_TABLE/EXTERNAL_CLIENT_ACL", + "path": "{}/ACL_TABLE/EXTERNAL_CLIENT_ACL".format(json_namespace), "value": { "policy_desc": "EXTERNAL_CLIENT_ACL", "services": [ @@ -661,7 +681,7 @@ def cacl_external_client_add_new_table(duthost): expected_content_list = ["EXTERNAL_CLIENT_ACL", "CTRLPLANE", "EXTERNAL_CLIENT", "EXTERNAL_CLIENT_ACL", "ingress"] - expect_acl_table_match(duthost, "EXTERNAL_CLIENT_ACL", expected_content_list) + expect_acl_table_match(duthost, "EXTERNAL_CLIENT_ACL", expected_content_list, namespace) finally: delete_tmpfile(duthost, tmpfile) @@ -674,25 +694,27 @@ def cacl_protocol(request): # noqa F811 return request.param -def test_cacl_tc1_acl_table_suite(cacl_protocol, rand_selected_dut): +def test_cacl_tc1_acl_table_suite(cacl_protocol, rand_selected_dut, rand_asic_namespace): + namespace, asic_id = rand_asic_namespace logger.info("Test acl table for protocol {}".format(cacl_protocol)) - cacl_tc1_add_new_table(rand_selected_dut, cacl_protocol) - cacl_tc1_add_duplicate_table(rand_selected_dut, cacl_protocol) - cacl_tc1_replace_table_variable(rand_selected_dut, cacl_protocol) - cacl_tc1_add_invalid_table(rand_selected_dut, cacl_protocol) - cacl_tc1_remove_unexisted_table(rand_selected_dut) - cacl_tc1_remove_table(rand_selected_dut, cacl_protocol) + cacl_tc1_add_new_table(rand_selected_dut, cacl_protocol, namespace) + cacl_tc1_add_duplicate_table(rand_selected_dut, cacl_protocol, namespace) + cacl_tc1_replace_table_variable(rand_selected_dut, cacl_protocol, namespace) + cacl_tc1_add_invalid_table(rand_selected_dut, cacl_protocol, namespace) + cacl_tc1_remove_unexisted_table(rand_selected_dut, namespace) + cacl_tc1_remove_table(rand_selected_dut, cacl_protocol, namespace) # ACL_RULE tests are related. So group them into one test. -def test_cacl_tc2_acl_rule_test(cacl_protocol, rand_selected_dut): +def test_cacl_tc2_acl_rule_test(cacl_protocol, rand_selected_dut, rand_asic_namespace): + namespace, asic_id = rand_asic_namespace logger.info("Test acl table for protocol {}".format(cacl_protocol)) if cacl_protocol == 'EXTERNAL_CLIENT': - cacl_external_client_add_new_table(rand_selected_dut) - cacl_tc2_add_init_rule(rand_selected_dut, cacl_protocol) - cacl_tc2_add_duplicate_rule(rand_selected_dut, cacl_protocol) - cacl_tc2_replace_rule(rand_selected_dut, cacl_protocol) - cacl_tc2_add_rule_to_unexisted_table(rand_selected_dut) - cacl_tc2_remove_table_before_rule(rand_selected_dut, cacl_protocol) - cacl_tc2_remove_unexist_rule(rand_selected_dut, cacl_protocol) - cacl_tc2_remove_rule(rand_selected_dut) + cacl_external_client_add_new_table(rand_selected_dut, namespace) + cacl_tc2_add_init_rule(rand_selected_dut, cacl_protocol, namespace) + cacl_tc2_add_duplicate_rule(rand_selected_dut, cacl_protocol, namespace) + cacl_tc2_replace_rule(rand_selected_dut, cacl_protocol, namespace) + cacl_tc2_add_rule_to_unexisted_table(rand_selected_dut, namespace) + cacl_tc2_remove_table_before_rule(rand_selected_dut, cacl_protocol, namespace) + cacl_tc2_remove_unexist_rule(rand_selected_dut, cacl_protocol, namespace) + cacl_tc2_remove_rule(rand_selected_dut, namespace) diff --git a/tests/generic_config_updater/test_ecn_config_update.py b/tests/generic_config_updater/test_ecn_config_update.py index a3459253981..5cf916a2b6b 100644 --- a/tests/generic_config_updater/test_ecn_config_update.py +++ b/tests/generic_config_updater/test_ecn_config_update.py @@ -44,7 +44,7 @@ def ensure_dut_readiness(duthost): delete_checkpoint(duthost) -def ensure_application_of_updated_config(duthost, configdb_field, values): +def ensure_application_of_updated_config(duthost, configdb_field, values, namespace=None): """ Ensures application of the JSON patch config update @@ -52,14 +52,20 @@ def ensure_application_of_updated_config(duthost, configdb_field, values): duthost: DUT host object configdb_field: config db field(s) under test values: expected value(s) of configdb_field + asic_index: Index id for the asic to be verified. Value is None for single asic platforms. """ def _confirm_value_in_asic_db(): - wred_objects = duthost.shell('sonic-db-cli ASIC_DB keys *WRED*')["stdout"] + namespace_prefix = '' if namespace is None else '-n ' + namespace + wred_objects = duthost.shell('sonic-db-cli {} ASIC_DB keys *WRED*'.format(namespace_prefix))["stdout"] wred_objects = wred_objects.split("\n") - if (len(wred_objects) > 1): + if len(wred_objects) > 1: for wred_object in wred_objects: - wred_data = duthost.shell('sonic-db-cli ASIC_DB hgetall {}'.format(wred_object))["stdout"] - if ('NULL' in wred_data): + wred_data = duthost.shell( + 'sonic-db-cli {} ASIC_DB hgetall {}'.format( + namespace_prefix, + wred_object) + )["stdout"] + if 'NULL' in wred_data: continue wred_data = ast.literal_eval(wred_data) for field, value in zip(configdb_field.split(','), values.split(',')): @@ -68,7 +74,8 @@ def _confirm_value_in_asic_db(): return True return False else: - wred_data = duthost.shell('sonic-db-cli ASIC_DB hgetall {}'.format(wred_objects[0]))["stdout"] + wred_data = duthost.shell('sonic-db-cli {} ASIC_DB hgetall {}'.format( + namespace_prefix, wred_objects[0]))["stdout"] wred_data = ast.literal_eval(wred_data) for field, value in zip(configdb_field.split(','), values.split(',')): if value != wred_data[WRED_MAPPING[field]]: @@ -86,14 +93,18 @@ def _confirm_value_in_asic_db(): @pytest.mark.parametrize("configdb_field", ["green_min_threshold", "green_max_threshold", "green_drop_probability", "green_min_threshold,green_max_threshold,green_drop_probability"]) @pytest.mark.parametrize("operation", ["replace"]) -def test_ecn_config_updates(duthost, ensure_dut_readiness, configdb_field, operation): +def test_ecn_config_updates(duthost, ensure_dut_readiness, configdb_field, operation, rand_asic_namespace): tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {} created for json patch of field: {} and operation: {}" .format(tmpfile, configdb_field, operation)) + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace json_patch = list() values = list() - ecn_data = duthost.shell('sonic-db-cli CONFIG_DB hgetall "WRED_PROFILE|AZURE_LOSSLESS"')['stdout'] + namespace_prefix = '' if asic_namespace is None else '-n ' + asic_namespace + ecn_data = duthost.shell('sonic-db-cli {} CONFIG_DB hgetall "WRED_PROFILE|AZURE_LOSSLESS"'.format( + namespace_prefix))['stdout'] ecn_data = ast.literal_eval(ecn_data) for field in configdb_field.split(','): value = int(ecn_data[field]) + 1 @@ -104,14 +115,14 @@ def test_ecn_config_updates(duthost, ensure_dut_readiness, configdb_field, opera json_patch.append( {"op": "{}".format(operation), - "path": "/WRED_PROFILE/AZURE_LOSSLESS/{}".format(field), + "path": "{}/WRED_PROFILE/AZURE_LOSSLESS/{}".format(json_namespace, field), "value": "{}".format(value)}) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) if is_valid_platform_and_version(duthost, "WRED_PROFILE", "ECN tuning", operation): expect_op_success(duthost, output) - ensure_application_of_updated_config(duthost, configdb_field, ",".join(values)) + ensure_application_of_updated_config(duthost, configdb_field, ",".join(values), namespace=asic_namespace) else: expect_op_failure(output) finally: diff --git a/tests/generic_config_updater/test_eth_interface.py b/tests/generic_config_updater/test_eth_interface.py index 7d63aaf8a95..565ee9e698d 100644 --- a/tests/generic_config_updater/test_eth_interface.py +++ b/tests/generic_config_updater/test_eth_interface.py @@ -16,6 +16,11 @@ logger = logging.getLogger(__name__) +DEFAULT_INTERFACE = { + 0: "Ethernet0", + 1: "Ethernet144" +} + @pytest.fixture(autouse=True) def ensure_dut_readiness(duthosts, rand_one_dut_hostname): @@ -40,8 +45,10 @@ def ensure_dut_readiness(duthosts, rand_one_dut_hostname): delete_checkpoint(duthost) -def is_valid_fec_state_db(duthost, value): - read_supported_fecs_cli = 'sonic-db-cli STATE_DB hget "PORT_TABLE|{}" supported_fecs'.format("Ethernet0") +def is_valid_fec_state_db(duthost, value, port, namespace=None): + namespace_prefix = '' if namespace is None else '-n ' + namespace + read_supported_fecs_cli = 'sonic-db-cli {} STATE_DB hget "PORT_TABLE|{}" supported_fecs'.format( + namespace_prefix, port) supported_fecs_str = duthost.shell(read_supported_fecs_cli)['stdout'] if supported_fecs_str: if supported_fecs_str != 'N/A': @@ -55,8 +62,31 @@ def is_valid_fec_state_db(duthost, value): return True -def is_valid_speed_state_db(duthost, value): - read_supported_speeds_cli = 'sonic-db-cli STATE_DB hget "PORT_TABLE|{}" supported_speeds'.format("Ethernet0") +def fec_exists_on_config_db(duthost, port, namespace=None): + """ + Check if FEC (Forward Error Correction) exists on the CONFIG_DB for a given port. + + Args: + duthost (object): The DUT (Device Under Test) host object. + port (str): The port for which FEC existence needs to be checked. + namespace (str, optional): The namespace in which the port exists. Defaults to None. + + Returns: + bool: True if FEC exists on the CONFIG_DB for the given port, False otherwise. + """ + namespace_prefix = '' if namespace is None else '-n ' + namespace + read_fec = 'sonic-db-cli {} CONFIG_DB hget "PORT|{}" fec'.format(namespace_prefix, port) + read_fec_str = duthost.shell(read_fec)['stdout'] + if read_fec_str: + return True + else: + return False + + +def is_valid_speed_state_db(duthost, value, port, namespace=None): + namespace_prefix = '' if namespace is None else '-n ' + namespace + read_supported_speeds_cli = 'sonic-db-cli {} STATE_DB hget "PORT_TABLE|{}" supported_speeds'.format( + namespace_prefix, port) supported_speeds_str = duthost.shell(read_supported_speeds_cli)['stdout'] supported_speeds = [int(s) for s in supported_speeds_str.split(',') if s] if supported_speeds and int(value) not in supported_speeds: @@ -64,9 +94,9 @@ def is_valid_speed_state_db(duthost, value): return True -def check_interface_status(duthost, field, interface='Ethernet0'): +def check_interface_status(duthost, field, interface): """ - Returns current status for Ethernet0 of specified field + Returns current status for interface of specified field Args: duthost: DUT host object under test @@ -82,20 +112,28 @@ def check_interface_status(duthost, field, interface='Ethernet0'): for line in status_data: if interface in line: interface_status = line.strip() - pytest_assert(len(interface_status) > 0, "Failed to read {} interface properties".format(interface)) + pytest_assert(len(interface_status) > 0, "Failed to read {} interface properties".format( + interface)) status = re.split(r" {2,}", interface_status)[field_index] return status -def get_ethernet_port_not_in_portchannel(duthost): +def get_ethernet_port_not_in_portchannel(duthost, namespace=None): """ Returns the name of an ethernet port which is not a member of a port channel Args: duthost: DUT host object under test + namespace: DUT asic namespace. asic0, asic1, localhost """ - config_facts = duthost.get_running_config_facts() + port_name = "" + config_facts = duthost.config_facts( + host=duthost.hostname, + source="running", + verbose=False, + namespace=namespace + )['ansible_facts'] ports = list(config_facts['PORT'].keys()) port_channel_members = [] if 'PORTCHANNEL_MEMBER' not in config_facts: @@ -108,17 +146,21 @@ def get_ethernet_port_not_in_portchannel(duthost): port_channel_members.append(member) for port in ports: if port not in port_channel_members: + port_role = config_facts['PORT'][port].get('role') + if port_role and port_role != 'Ext': # ensure port is front-panel port + continue port_name = port break return port_name -def get_port_speeds_for_test(duthost): +def get_port_speeds_for_test(duthost, port): """ Get the speeds parameters for case test_update_speed, including 2 valid speeds and 1 invalid speed Args: duthost: DUT host object + port: The port for which speeds need to be tested """ speeds_to_test = [] invalid_speed_yang = ("20a", False) @@ -126,7 +168,7 @@ def get_port_speeds_for_test(duthost): if duthost.get_facts()['asic_type'] == 'vs': valid_speeds = ['20000', '40000'] else: - valid_speeds = duthost.get_supported_speeds('Ethernet0') + valid_speeds = duthost.get_supported_speeds(port) if valid_speeds: invalid_speed_state_db = (str(int(valid_speeds[0]) - 1), False) pytest_assert(valid_speeds, "Failed to get any valid port speed to test.") @@ -138,12 +180,18 @@ def get_port_speeds_for_test(duthost): return speeds_to_test -def test_remove_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_remove_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") + json_patch = [ { "op": "remove", - "path": "/PORT/Ethernet0/lanes" + "path": "{}/PORT/{}/lanes".format(json_namespace, port) } ] @@ -158,18 +206,24 @@ def test_remove_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness): @pytest.mark.skip(reason="Bypass as it is blocking submodule update") -def test_replace_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_replace_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] - cur_lanes = check_interface_status(duthost, "Lanes") + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") + + cur_lanes = check_interface_status(duthost, "Lanes", port) cur_lanes = cur_lanes.split(",") cur_lanes.sort() update_lanes = cur_lanes update_lanes[-1] = str(int(update_lanes[-1]) + 1) update_lanes = ",".join(update_lanes) + json_patch = [ { "op": "replace", - "path": "/PORT/Ethernet0/lanes", + "path": "{}/PORT/{}/lanes".format(json_namespace, port), "value": "{}".format(update_lanes) } ] @@ -184,17 +238,20 @@ def test_replace_lanes(duthosts, rand_one_dut_hostname, ensure_dut_readiness): delete_tmpfile(duthost, tmpfile) -def test_replace_mtu(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_replace_mtu(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace # Can't directly change mtu of the port channel member # So find a ethernet port that are not in a port channel - port_name = get_ethernet_port_not_in_portchannel(duthost) + port_name = get_ethernet_port_not_in_portchannel(duthost, asic_namespace) + pytest_assert(port_name, "No available ethernet ports, all ports are in port channels.") target_mtu = "1514" json_patch = [ { "op": "replace", - "path": "/PORT/{}/mtu".format(port_name), + "path": "{}/PORT/{}/mtu".format(json_namespace, port_name), "value": "{}".format(target_mtu) } ] @@ -213,12 +270,16 @@ def test_replace_mtu(duthosts, rand_one_dut_hostname, ensure_dut_readiness): @pytest.mark.parametrize("pfc_asym", ["on", "off"]) -def test_toggle_pfc_asym(duthosts, rand_one_dut_hostname, ensure_dut_readiness, pfc_asym): +def test_toggle_pfc_asym(duthosts, rand_one_dut_hostname, ensure_dut_readiness, pfc_asym, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") json_patch = [ { "op": "replace", - "path": "/PORT/Ethernet0/pfc_asym", + "path": "{}/PORT/{}/pfc_asym".format(json_namespace, port), "value": "{}".format(pfc_asym) } ] @@ -229,7 +290,7 @@ def test_toggle_pfc_asym(duthosts, rand_one_dut_hostname, ensure_dut_readiness, try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - current_status_pfc_asym = check_interface_status(duthost, "Asym") + current_status_pfc_asym = check_interface_status(duthost, "Asym", port) pytest_assert(current_status_pfc_asym == pfc_asym, "Failed to properly configure interface Asym PFC to requested value off") finally: @@ -238,28 +299,35 @@ def test_toggle_pfc_asym(duthosts, rand_one_dut_hostname, ensure_dut_readiness, @pytest.mark.device_type('physical') @pytest.mark.parametrize("fec", ["rs", "fc"]) -def test_replace_fec(duthosts, rand_one_dut_hostname, ensure_dut_readiness, fec): +def test_replace_fec(duthosts, rand_one_dut_hostname, ensure_dut_readiness, fec, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") json_patch = [ { "op": "add", - "path": "/PORT/Ethernet0/fec", + "path": "{}/PORT/{}/fec".format(json_namespace, port), "value": "{}".format(fec) } ] tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) + fec_cfg_exists = fec_exists_on_config_db(duthost, port, namespace=asic_namespace) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) - if is_valid_fec_state_db(duthost, fec): + if is_valid_fec_state_db(duthost, fec, port, namespace=asic_namespace): expect_op_success(duthost, output) - current_status_fec = check_interface_status(duthost, "FEC") + current_status_fec = check_interface_status(duthost, "FEC", port) pytest_assert(current_status_fec == fec, "Failed to properly configure interface FEC to requested value {}".format(fec)) # The rollback after the test cannot revert the fec, when fec is not configured in config_db.json - if duthost.facts['platform'] in ['x86_64-arista_7050_qx32s']: + # adding generic check to restore fec if not included in config_db.json + # keeping previous platform check for backwards compatibility + if duthost.facts['platform'] in ['x86_64-arista_7050_qx32s'] or fec_cfg_exists is False: config_reload(duthost, safe_reload=True) else: expect_op_failure(output) @@ -268,12 +336,16 @@ def test_replace_fec(duthosts, rand_one_dut_hostname, ensure_dut_readiness, fec) @pytest.mark.skip(reason="Bypass as this is not a production scenario") -def test_update_invalid_index(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_update_invalid_index(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") json_patch = [ { "op": "replace", - "path": "/PORT/Ethernet0/index", + "path": "{}/PORT/{}/index".format(json_namespace, port), "value": "abc1" } ] @@ -289,15 +361,20 @@ def test_update_invalid_index(duthosts, rand_one_dut_hostname, ensure_dut_readin @pytest.mark.skip(reason="Bypass as this is not a production scenario") -def test_update_valid_index(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_update_valid_index(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] - output = duthost.shell('sonic-db-cli CONFIG_DB keys "PORT|"\\*')["stdout"] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + namespace_prefix = '' if asic_namespace is None else '-n ' + asic_namespace + output = duthost.shell('sonic-db-cli {} CONFIG_DB keys "PORT|"\\*'.format( + namespace_prefix))["stdout"] interfaces = {} # to be filled with two interfaces mapped to their indeces for line in output.split('\n'): if line.startswith('PORT|Ethernet'): interface = line[line.index('Ethernet'):].strip() - index = duthost.shell('sonic-db-cli CONFIG_DB hget "PORT|{}" index'.format(interface))["stdout"] + index = duthost.shell('sonic-db-cli {} CONFIG_DB hget "PORT|{}" index'.format( + namespace_prefix, interface))["stdout"] interfaces[interface] = index if len(interfaces) == 2: break @@ -306,12 +383,12 @@ def test_update_valid_index(duthosts, rand_one_dut_hostname, ensure_dut_readines json_patch = [ { "op": "replace", - "path": "/PORT/{}/index".format(list(interfaces.keys())[0]), + "path": "{}/PORT/{}/index".format(json_namespace, list(interfaces.keys())[0]), "value": "{}".format(list(interfaces.values())[1]) }, { "op": "replace", - "path": "/PORT/{}/index".format(list(interfaces.keys())[1]), + "path": "{}/PORT/{}/index".format(json_namespace, list(interfaces.keys())[1]), "value": "{}".format(list(interfaces.values())[0]) } ] @@ -326,14 +403,18 @@ def test_update_valid_index(duthosts, rand_one_dut_hostname, ensure_dut_readines delete_tmpfile(duthost, tmpfile) -def test_update_speed(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_update_speed(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] - speed_params = get_port_speeds_for_test(duthost) + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") + speed_params = get_port_speeds_for_test(duthost, port) for speed, is_valid in speed_params: json_patch = [ { "op": "replace", - "path": "/PORT/Ethernet0/speed", + "path": "{}/PORT/{}/speed".format(json_namespace, port), "value": "{}".format(speed) } ] @@ -343,9 +424,9 @@ def test_update_speed(duthosts, rand_one_dut_hostname, ensure_dut_readiness): try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) - if is_valid and is_valid_speed_state_db(duthost, speed): + if is_valid and is_valid_speed_state_db(duthost, speed, port, namespace=asic_namespace): expect_op_success(duthost, output) - current_status_speed = check_interface_status(duthost, "Speed").replace("G", "000") + current_status_speed = check_interface_status(duthost, "Speed", port).replace("G", "000") current_status_speed = current_status_speed.replace("M", "") pytest_assert(current_status_speed == speed, "Failed to properly configure interface speed to requested value {}".format(speed)) @@ -355,12 +436,16 @@ def test_update_speed(duthosts, rand_one_dut_hostname, ensure_dut_readiness): delete_tmpfile(duthost, tmpfile) -def test_update_description(duthosts, rand_one_dut_hostname, ensure_dut_readiness): +def test_update_description(duthosts, rand_one_dut_hostname, ensure_dut_readiness, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") json_patch = [ { "op": "replace", - "path": "/PORT/Ethernet0/description", + "path": "{}/PORT/{}/description".format(json_namespace, port), "value": "Updated description" } ] @@ -376,12 +461,16 @@ def test_update_description(duthosts, rand_one_dut_hostname, ensure_dut_readines @pytest.mark.parametrize("admin_status", ["up", "down"]) -def test_eth_interface_admin_change(duthosts, rand_one_dut_hostname, admin_status): +def test_eth_interface_admin_change(duthosts, rand_one_dut_hostname, admin_status, rand_asic_namespace): duthost = duthosts[rand_one_dut_hostname] + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + asic_index = 0 if asic_id is None else asic_id + port = DEFAULT_INTERFACE.get(asic_index, "DefaultPort") json_patch = [ { "op": "add", - "path": "/PORT/Ethernet0/admin_status", + "path": "{}/PORT/{}/admin_status".format(json_namespace, port), "value": "{}".format(admin_status) } ] @@ -393,7 +482,7 @@ def test_eth_interface_admin_change(duthosts, rand_one_dut_hostname, admin_statu output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - pytest_assert(wait_until(10, 2, 0, lambda: check_interface_status(duthost, "Admin") == admin_status), + pytest_assert(wait_until(10, 2, 0, lambda: check_interface_status(duthost, "Admin", port) == admin_status), "Interface failed to update admin status to {}".format(admin_status)) finally: delete_tmpfile(duthost, tmpfile) diff --git a/tests/generic_config_updater/test_ip_bgp.py b/tests/generic_config_updater/test_ip_bgp.py new file mode 100644 index 00000000000..9253ba22e61 --- /dev/null +++ b/tests/generic_config_updater/test_ip_bgp.py @@ -0,0 +1,214 @@ +import logging +import pytest +import ipaddress +import re + +from tests.common.helpers.assertions import pytest_assert +from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure +from tests.common.gu_utils import generate_tmpfile, delete_tmpfile +from tests.common.gu_utils import create_checkpoint, delete_checkpoint, rollback_or_reload + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology('t0', 't1', 't2', 'm0', 'mx'), +] + + +@pytest.fixture(autouse=True) +def ensure_dut_readiness(duthost): + """ + Setup/teardown fixture for each ipv6 test + rollback to check if it goes back to starting config + + Args: + duthost: DUT host object under test + """ + create_checkpoint(duthost) + + yield + + try: + logger.info("Rolled back to original checkpoint") + rollback_or_reload(duthost) + finally: + delete_checkpoint(duthost) + + +def get_ip_neighbor(duthost, namespace=None, ip_version=6): + """ + Returns ip BGP neighbor address, properties of BGP neighbor + + Args: + duthost: DUT host object + namespace: DUT asic namespace. asic0, asic1, None + ip_version: IP version. 4, 6 + """ + + config_facts = duthost.config_facts(host=duthost.hostname, source="running", + verbose=False, namespace=namespace)['ansible_facts'] + + bgp_neighbors_data = config_facts['BGP_NEIGHBOR'] + for neighbor_address in list(bgp_neighbors_data.keys()): + if ipaddress.ip_address((neighbor_address.encode().decode())).version == ip_version: + return neighbor_address, bgp_neighbors_data[neighbor_address] + pytest_assert(True, "No existing ipv{} neighbor".format(ip_version)) + + +def check_neighbor_existence(duthost, neighbor_address, ip_version=6): + cmd = 'show ip bgp su' if ip_version == 4 else 'show ipv6 bgp su' + ip_bgp_su = duthost.shell(cmd)['stdout'] + return re.search(r'\b{}\b'.format(neighbor_address), ip_bgp_su) + + +def add_deleted_ip_neighbor(duthost, namespace=None, ip_version=6): + ip_neighbor_address, ip_neighbor_config = get_ip_neighbor(duthost, namespace, ip_version) + neighbor_exists = check_neighbor_existence(duthost, ip_neighbor_address, ip_version) + pytest_assert(neighbor_exists, "Nonexistent ipv{} BGP neighbor".format(ip_version)) + + duthost.shell('config bgp remove neighbor {}'.format(ip_neighbor_address)) + neighbor_exists = check_neighbor_existence(duthost, ip_neighbor_address, ip_version) + pytest_assert(not neighbor_exists, + "Failed to remove ipv{} BGP neighbor under test".format(ip_version)) + + json_namespace = '' if namespace is None else '/' + namespace + json_patch = [ + { + "op": "add", + "path": "{}/BGP_NEIGHBOR/{}".format(json_namespace, ip_neighbor_address), + "value": ip_neighbor_config + } + ] + + tmpfile = generate_tmpfile(duthost) + logger.info("tmpfile {}".format(tmpfile)) + + try: + output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + expect_op_success(duthost, output) + neighbor_exists = check_neighbor_existence(duthost, ip_neighbor_address, ip_version) + pytest_assert(neighbor_exists, + "GCU failed to add back deleted ipv{} BGP neighbor".format(ip_version)) + finally: + delete_tmpfile(duthost, tmpfile) + + +def add_duplicate_ip_neighbor(duthost, namespace=None, ip_version=6): + ip_neighbor_address, ip_neighbor_config = get_ip_neighbor(duthost, namespace, ip_version) + + json_namespace = '' if namespace is None else '/' + namespace + json_patch = [ + { + "op": "add", + "path": "{}/BGP_NEIGHBOR/{}".format(json_namespace, ip_neighbor_address), + "value": ip_neighbor_config + } + ] + + tmpfile = generate_tmpfile(duthost) + logger.info("tmpfile {}".format(tmpfile)) + + try: + output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + expect_op_success(duthost, output) + neighbor_exists = check_neighbor_existence(duthost, ip_neighbor_address, ip_version) + pytest_assert(neighbor_exists, + "Expected ipv{} BGP neighbor does not exist".format(ip_version)) + finally: + delete_tmpfile(duthost, tmpfile) + + +def invalid_ip_neighbor(duthost, namespace=None, ip_version=6): + xfailv6_input = [ + ("add", "FC00::xyz/126"), + ("remove", "FC00::01/126") + ] + xfailv4_input = [ + ("add", "10.0.0.256/31"), + ("remove", "10.0.0.0/31") + ] + + xfail_input = xfailv4_input if ip_version == 4 else xfailv6_input + json_namespace = '' if namespace is None else '/' + namespace + for op, dummy_neighbor_ip_address in xfail_input: + json_patch = [ + { + "op": "{}".format(op), + "path": "{}/BGP_NEIGHBOR/{}".format(json_namespace, dummy_neighbor_ip_address), + "value": {} + } + ] + + tmpfile = generate_tmpfile(duthost) + logger.info("tmpfile {}".format(tmpfile)) + + try: + output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + expect_op_failure(output) + finally: + delete_tmpfile(duthost, tmpfile) + + +def ip_neighbor_admin_change(duthost, namespace=None, ip_version=6): + ip_neighbor_address, ip_neighbor_config = get_ip_neighbor(duthost, namespace, ip_version) + json_namespace = '' if namespace is None else '/' + namespace + json_patch = [ + { + "op": "add", + "path": "{}/BGP_NEIGHBOR/{}/admin_status".format(json_namespace, ip_neighbor_address), + "value": "up" + }, + { + "op": "replace", + "path": "{}/BGP_NEIGHBOR/{}/admin_status".format(json_namespace, ip_neighbor_address), + "value": "down" + } + ] + + tmpfile = generate_tmpfile(duthost) + logger.info("tmpfile {}".format(tmpfile)) + + try: + output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + expect_op_success(duthost, output) + + ip_type = 'ip' if ip_version == 4 else 'ipv6' + cmds = "show {} bgp su | grep -w {}".format(ip_type, ip_neighbor_address) + output = duthost.shell(cmds) + pytest_assert(not output['rc'] and "Idle (Admin)" in output['stdout'], + "BGP Neighbor with addr {} failed to admin down.".format(ip_neighbor_address)) + finally: + delete_tmpfile(duthost, tmpfile) + + +def delete_ip_neighbor(duthost, namespace=None, ip_version=6): + ipv6_neighbor_address, ipv6_neighbor_config = get_ip_neighbor(duthost, namespace, ip_version) + json_namespace = '' if namespace is None else '/' + namespace + json_patch = [ + { + "op": "remove", + "path": "{}/BGP_NEIGHBOR/{}".format(json_namespace, ipv6_neighbor_address) + } + ] + + tmpfile = generate_tmpfile(duthost) + logger.info("tmpfile {}".format(tmpfile)) + + try: + output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) + expect_op_success(duthost, output) + neighbor_exists = check_neighbor_existence(duthost, ipv6_neighbor_address, ip_version) + pytest_assert(not neighbor_exists, + "Failed to remove ipv{} BGP neighbor under test".format(ip_version)) + finally: + delete_tmpfile(duthost, tmpfile) + + +@pytest.mark.parametrize("ip_version", [6, 4]) +def test_ip_suite(duthost, ensure_dut_readiness, rand_asic_namespace, ip_version): + asic_namespace, asic_id = rand_asic_namespace + add_deleted_ip_neighbor(duthost, asic_namespace, ip_version) + add_duplicate_ip_neighbor(duthost, asic_namespace, ip_version) + invalid_ip_neighbor(duthost, asic_namespace, ip_version) + ip_neighbor_admin_change(duthost, asic_namespace, ip_version) + delete_ip_neighbor(duthost, asic_namespace, ip_version) diff --git a/tests/generic_config_updater/test_kubernetes_config.py b/tests/generic_config_updater/test_kubernetes_config.py index 51d4234141d..9f371d939bc 100644 --- a/tests/generic_config_updater/test_kubernetes_config.py +++ b/tests/generic_config_updater/test_kubernetes_config.py @@ -16,6 +16,7 @@ # K8S config + K8SEMPTYCONFIG = [] K8SHALFCONFIG = [ '"KUBERNETES_MASTER": {\n' @@ -207,6 +208,10 @@ def setup_env(duthosts, rand_one_dut_hostname): delete_checkpoint(duthost) +def add_namespace_indentation(multiline_string, spaces=4): + return multiline_string.replace('\n', '\n' + ' ' * spaces) + + def get_k8s_runningconfig(duthost): """ Get k8s config from running config Sample output: K8SEMPTYCONFIG, K8SHALFCONFIG, K8SFULLCONFIG @@ -266,6 +271,13 @@ def k8s_config_update(duthost, test_data): tmpfile = generate_tmpfile(duthost) logger.info("tmpfile {}".format(tmpfile)) + if duthost.is_multi_asic: + for patch in json_patch: + if 'path' in patch: + patch['path'] = re.sub(r'^/', '/localhost/', patch['path']) + + target_config = [add_namespace_indentation(item) for item in target_config] + try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) diff --git a/tests/generic_config_updater/test_lo_interface.py b/tests/generic_config_updater/test_lo_interface.py index 2b04831e87f..0a4bc5d54db 100644 --- a/tests/generic_config_updater/test_lo_interface.py +++ b/tests/generic_config_updater/test_lo_interface.py @@ -79,6 +79,10 @@ def setup_env(duthosts, rand_one_dut_hostname, lo_intf): def cleanup_lo_interface_config(duthost, cfg_facts): lo_interfaces = cfg_facts.get('LOOPBACK_INTERFACE', {}) for lo_interface in lo_interfaces: + # bypass special Loopback4096 that is being used for t2 as this will be recognized as 'invalid name' + # for Loopback interfaces, as the recommended is LoopbackXXX + if lo_interface == 'Loopback4096': + continue del_loopback_interface = duthost.shell( "sudo config loopback del {}".format(lo_interface), module_ignore_errors=True) @@ -100,10 +104,11 @@ def lo_interface_tc1_add_init(duthost, lo_intf): """ lo_ip = "{}|{}".format(DEFAULT_LOOPBACK, lo_intf["ip"]) lo_ipv6 = "{}|{}".format(DEFAULT_LOOPBACK, lo_intf["ipv6"]) + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/LOOPBACK_INTERFACE", + "path": "{}/LOOPBACK_INTERFACE".format(json_namespace), "value": { DEFAULT_LOOPBACK: {}, lo_ip: {}, @@ -142,17 +147,36 @@ def lo_interface_tc1_add_duplicate(duthost, lo_intf): """ lo_ip = "{}|{}".format(DEFAULT_LOOPBACK, lo_intf["ip"]) lo_ipv6 = "{}|{}".format(DEFAULT_LOOPBACK, lo_intf["ipv6"]) + + path_lo_ip = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + lo_ip + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + lo_ip + ]) + ) + path_lo_ipv6 = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + lo_ipv6 + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + lo_ipv6 + ]) + ) json_patch = [ { "op": "add", - "path": create_path(["LOOPBACK_INTERFACE", - lo_ip]), + "path": path_lo_ip, "value": {} }, { "op": "add", - "path": create_path(["LOOPBACK_INTERFACE", - lo_ipv6]), + "path": path_lo_ipv6, "value": {} } ] @@ -191,17 +215,37 @@ def lo_interface_tc1_xfail(duthost, lo_intf): for op, name, dummy_lo_interface_v4, dummy_lo_interface_v6 in xfail_input: dummy_lo_interface_v4 = name + "|" + dummy_lo_interface_v4 dummy_lo_interface_v6 = name + "|" + dummy_lo_interface_v6 + + path_lo_interface_v4 = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + dummy_lo_interface_v4 + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + dummy_lo_interface_v4 + ]) + ) + path_lo_interface_v6 = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + dummy_lo_interface_v6 + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + dummy_lo_interface_v6 + ]) + ) + json_patch = [ { "op": "{}".format(op), - "path": create_path(["LOOPBACK_INTERFACE", - dummy_lo_interface_v4]), + "path": path_lo_interface_v4, "value": {} }, { "op": "{}".format(op), - "path": create_path(["LOOPBACK_INTERFACE", - dummy_lo_interface_v6]), + "path": path_lo_interface_v6, "value": {} } ] @@ -234,27 +278,63 @@ def lo_interface_tc1_replace(duthost, lo_intf): lo_ipv6 = "{}|{}".format(DEFAULT_LOOPBACK, lo_intf["ipv6"]) replaced_ip = "{}|{}".format(DEFAULT_LOOPBACK, REPLACE_IP) replaced_ipv6 = "{}|{}".format(DEFAULT_LOOPBACK, REPLACE_IPV6) + + path_lo_ip = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + lo_ip + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + lo_ip + ]) + ) + path_lo_ipv6 = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + lo_ipv6 + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + lo_ipv6 + ]) + ) + path_replaced_ip = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + replaced_ip]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + replaced_ip + ]) + ) + path_replaced_ipv6 = ( + create_path([ + "localhost", + "LOOPBACK_INTERFACE", + replaced_ipv6 + ]) if duthost.is_multi_asic else create_path([ + "LOOPBACK_INTERFACE", + replaced_ipv6 + ]) + ) json_patch = [ { "op": "remove", - "path": create_path(["LOOPBACK_INTERFACE", - lo_ip]) + "path": path_lo_ip }, { "op": "remove", - "path": create_path(["LOOPBACK_INTERFACE", - lo_ipv6]) + "path": path_lo_ipv6 }, { "op": "add", - "path": create_path(["LOOPBACK_INTERFACE", - replaced_ip]), + "path": path_replaced_ip, "value": {} }, { "op": "add", - "path": create_path(["LOOPBACK_INTERFACE", - replaced_ipv6]), + "path": path_replaced_ipv6, "value": {} } ] @@ -277,10 +357,11 @@ def lo_interface_tc1_replace(duthost, lo_intf): def lo_interface_tc1_remove(duthost, lo_intf): """ Remove v4 and v6 loopback intf config """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/LOOPBACK_INTERFACE" + "path": "{}/LOOPBACK_INTERFACE".format(json_namespace) } ] @@ -348,6 +429,7 @@ def setup_vrf_config(duthost, lo_intf): delete_tmpfile(duthost, tmpfile) +@pytest.mark.topology('t0', 'm0', 'mx', 't2') def test_lo_interface_tc1_suite(rand_selected_dut, cfg_facts, lo_intf): cleanup_lo_interface_config(rand_selected_dut, cfg_facts) lo_interface_tc1_add_init(rand_selected_dut, lo_intf) diff --git a/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py b/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py index 0d80da1ed65..29382a6250c 100644 --- a/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py +++ b/tests/generic_config_updater/test_mmu_dynamic_threshold_config_update.py @@ -40,7 +40,7 @@ def ensure_dut_readiness(duthost): delete_checkpoint(duthost) -def ensure_application_of_updated_config(duthost, value, pg_lossless_profiles): +def ensure_application_of_updated_config(duthost, value, pg_lossless_profiles, namespace=None): """ Ensures application of the JSON patch config update by verifying dynamic threshold value presence in DB @@ -48,31 +48,38 @@ def ensure_application_of_updated_config(duthost, value, pg_lossless_profiles): duthost: DUT host object value: expected value of dynamic threshold pg_lossless_profiles: all pg_lossless buffer profiles stored on the device + namespace: Namespace to run the command in. Ex. asic0, asic1, None """ def _confirm_value_in_appl_db_and_asic_db(): - + namespace_prefix = '' if namespace is None else '-n ' + namespace + redis_namespace_prefix = '' if namespace is None else 'sudo ip netns exec {}'.format(namespace) for pg_lossless_profile in pg_lossless_profiles: # Retrieve dynamic_th from APPL_DB - dynamic_th_in_appl_db = duthost.shell("sonic-db-cli APPL_DB hget BUFFER_PROFILE_" - "TABLE:{} dynamic_th".format(pg_lossless_profile))["stdout"] + dynamic_th_in_appl_db = duthost.shell( + "sonic-db-cli {} APPL_DB hget BUFFER_PROFILE_TABLE:{} dynamic_th".format( + namespace_prefix, + pg_lossless_profile + ) + )["stdout"] if dynamic_th_in_appl_db != value: return False # Retrieve dynamic_th from ASIC_DB - ingress_lossless_pool_oid = duthost.shell("sonic-db-cli COUNTERS_DB hget COUNTERS_BUFFER_POOL_NAME_MAP " - "ingress_lossless_pool")["stdout"] - buffer_pool_keys = duthost.shell("redis-cli -n 1 KEYS ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE:" - "oid*")["stdout_lines"] + ingress_lossless_pool_oid = duthost.shell("sonic-db-cli {} COUNTERS_DB hget COUNTERS_BUFFER_POOL_NAME_MAP " + "ingress_lossless_pool".format(namespace_prefix))["stdout"] + buffer_pool_keys = duthost.shell("{} redis-cli -n 1 KEYS ASIC_STATE:SAI_OBJECT_TYPE_BUFFER_PROFILE:" + "oid*".format(redis_namespace_prefix))["stdout_lines"] for buffer_pool in buffer_pool_keys: - pool_oid = duthost.shell("sonic-db-cli ASIC_DB hget {} SAI_BUFFER_PROFILE_ATTR_" - "POOL_ID".format(buffer_pool))["stdout"] + pool_oid = duthost.shell("sonic-db-cli {} ASIC_DB hget {} SAI_BUFFER_PROFILE_ATTR_" + "POOL_ID".format(namespace_prefix, buffer_pool))["stdout"] if pool_oid == ingress_lossless_pool_oid: - xoff_val = duthost.shell("sonic-db-cli ASIC_DB hget {} SAI_BUFFER_PROFILE_ATTR_" - "XOFF_TH".format(buffer_pool))["stdout"] - dynamic_th_in_asic_db = duthost.shell("sonic-db-cli ASIC_DB hget {} SAI_BUFFER_PROFILE_" - "ATTR_SHARED_DYNAMIC_TH".format(buffer_pool))["stdout"] + xoff_val = duthost.shell("sonic-db-cli {} ASIC_DB hget {} SAI_BUFFER_PROFILE_ATTR_" + "XOFF_TH".format(namespace_prefix, buffer_pool))["stdout"] + dynamic_th_in_asic_db = duthost.shell("sonic-db-cli {} ASIC_DB hget {} SAI_BUFFER_PROFILE_" + "ATTR_SHARED_DYNAMIC_TH" + .format(namespace_prefix, buffer_pool))["stdout"] # Dynamic threshold values are a mismatch for pg_lossless profiles if dynamic_th_in_asic_db != value and len(xoff_val) > 0: return False @@ -80,19 +87,33 @@ def _confirm_value_in_appl_db_and_asic_db(): return True pytest_assert( - wait_until(READ_APPL_DB_TIMEOUT, READ_APPL_DB_INTERVAL, 0, _confirm_value_in_appl_db_and_asic_db), - "ASIC_DB or APPL_DB does not properly reflect new dynamic threshold expected value: {}".format(value) + wait_until( + READ_APPL_DB_TIMEOUT, + READ_APPL_DB_INTERVAL, + 0, + _confirm_value_in_appl_db_and_asic_db + ), + "ASIC_DB or APPL_DB for namespace {} does not properly reflect new dynamic threshold expected value: {}".format( + namespace, + value + ) ) -def get_pg_lossless_profiles(duthost): +def get_pg_lossless_profiles(duthost, namespace=None): """ Retrieves all pg_lossless buffer profiles that are present on the device. Ex. pg_lossless_100000_40m_profile Args: duthost: DUT host object + namespace: Namespace to run the command in. Ex. asic0, asic1, None """ - pg_lossless_profiles_str = duthost.shell("redis-cli -n 0 KEYS *BUFFER_PROFILE_TABLE:pg_lossless*")["stdout_lines"] + namespace_prefix = '' if namespace is None else '-n ' + namespace + pg_lossless_profiles_str = duthost.shell( + "sonic-db-cli {} APPL_DB KEYS *BUFFER_PROFILE_TABLE:pg_lossless*".format( + namespace_prefix + ) + )["stdout_lines"] pg_lossless_profiles_lst = [] for pg_lossless_profile_str in pg_lossless_profiles_str: @@ -108,8 +129,12 @@ def get_pg_lossless_profiles(duthost): @pytest.mark.parametrize("operation", ["replace"]) -def test_dynamic_th_config_updates(duthost, ensure_dut_readiness, operation, skip_when_buffer_is_dynamic_model): - pg_lossless_profiles = get_pg_lossless_profiles(duthost) +def test_dynamic_th_config_updates(duthost, ensure_dut_readiness, operation, + skip_when_buffer_is_dynamic_model, rand_asic_namespace): + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + + pg_lossless_profiles = get_pg_lossless_profiles(duthost, namespace=asic_namespace) pytest_require(pg_lossless_profiles, "DUT has no pg_lossless buffer profiles") new_dynamic_th = "2" json_patch = [] @@ -117,7 +142,7 @@ def test_dynamic_th_config_updates(duthost, ensure_dut_readiness, operation, ski for pg_lossless_profile in pg_lossless_profiles: individual_patch = { "op": "{}".format(operation), - "path": "/BUFFER_PROFILE/{}/dynamic_th".format(pg_lossless_profile), + "path": "{}/BUFFER_PROFILE/{}/dynamic_th".format(json_namespace, pg_lossless_profile), "value": new_dynamic_th } json_patch.append(individual_patch) @@ -130,7 +155,7 @@ def test_dynamic_th_config_updates(duthost, ensure_dut_readiness, operation, ski try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - ensure_application_of_updated_config(duthost, new_dynamic_th, pg_lossless_profiles) + ensure_application_of_updated_config(duthost, new_dynamic_th, pg_lossless_profiles, namespace=asic_namespace) logger.info("Config successfully updated and verified.") finally: delete_tmpfile(duthost, tmpfile) diff --git a/tests/generic_config_updater/test_monitor_config.py b/tests/generic_config_updater/test_monitor_config.py index 860a5676558..ea7a5713338 100644 --- a/tests/generic_config_updater/test_monitor_config.py +++ b/tests/generic_config_updater/test_monitor_config.py @@ -21,37 +21,53 @@ @pytest.fixture(scope='module') -def get_valid_acl_ports(cfg_facts): +def get_valid_acl_ports(rand_selected_dut, rand_asic_namespace): """ Get valid acl ports that could be added to ACL table valid ports refers to the portchannels and ports not belongs portchannel """ - ports = set() - portchannel_members = set() - portchannel_member_dict = cfg_facts.get('PORTCHANNEL_MEMBER', {}) - for po, po_members in list(portchannel_member_dict.items()): - ports.add(po) - for po_member in po_members: - portchannel_members.add(po_member) - - port_dict = cfg_facts.get('PORT', {}) - for key in port_dict: - if key not in portchannel_members: - ports.add(key) - - return list(ports) - - -def bgp_monitor_config_cleanup(duthost): + asic_namespace, asic_id = rand_asic_namespace + + def _get_valid_acl_ports(): + ports = set() + portchannel_members = set() + + cfg_facts = rand_selected_dut.config_facts( + host=rand_selected_dut.hostname, + source="running", + verbose=False, + namespace=asic_namespace + )['ansible_facts'] + portchannel_member_dict = cfg_facts.get('PORTCHANNEL_MEMBER', {}) + for po, po_members in list(portchannel_member_dict.items()): + ports.add(po) + for po_member in po_members: + portchannel_members.add(po_member) + + port_dict = cfg_facts.get('PORT', {}) + for key in port_dict: + if key not in portchannel_members: + port_role = cfg_facts['PORT'][key].get('role') + if port_role and port_role != 'Ext': # ensure port is front-panel port + continue + ports.add(key) + return list(ports) + + return _get_valid_acl_ports() + + +def bgp_monitor_config_cleanup(duthost, namespace=None): """ Test requires no monitor config Clean up current monitor config if existed """ cmds = [] - cmds.append('sonic-db-cli CONFIG_DB del "ACL_TABLE|{}"'.format(MONITOR_CONFIG_ACL_TABLE)) - cmds.append('sonic-db-cli CONFIG_DB del "ACL_RULE|{}|{}"' - .format(MONITOR_CONFIG_ACL_TABLE, MONITOR_CONFIG_ACL_RULE)) - cmds.append('sonic-db-cli CONFIG_DB del "MIRROR_SESSION|{}"'.format(MONITOR_CONFIG_MIRROR_SESSION)) - cmds.append('sonic-db-cli CONFIG_DB del "POLICER|{}"'.format(MONITOR_CONFIG_POLICER)) + namespace_prefix = '' if namespace is None else '-n ' + namespace + cmds.append('sonic-db-cli {} CONFIG_DB del "ACL_TABLE|{}"'.format(namespace_prefix, MONITOR_CONFIG_ACL_TABLE)) + cmds.append('sonic-db-cli {} CONFIG_DB del "ACL_RULE|{}|{}"' + .format(namespace_prefix, MONITOR_CONFIG_ACL_TABLE, MONITOR_CONFIG_ACL_RULE)) + cmds.append('sonic-db-cli {} CONFIG_DB del "MIRROR_SESSION|{}"' + .format(namespace_prefix, MONITOR_CONFIG_MIRROR_SESSION)) + cmds.append('sonic-db-cli {} CONFIG_DB del "POLICER|{}"'.format(namespace_prefix, MONITOR_CONFIG_POLICER)) output = duthost.shell_cmds(cmds=cmds)['results'] for res in output: @@ -141,13 +157,17 @@ def verify_no_monitor_config(duthost): MONITOR_CONFIG_MIRROR_SESSION, MONITOR_CONFIG_POLICER]) -def monitor_config_add_config(duthost, get_valid_acl_ports): +def monitor_config_add_config(duthost, rand_asic_namespace, get_valid_acl_ports): """ Test to add everflow always on config """ + + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace + json_patch = [ { "op": "add", - "path": "/ACL_TABLE/{}".format(MONITOR_CONFIG_ACL_TABLE), + "path": "{}/ACL_TABLE/{}".format(json_namespace, MONITOR_CONFIG_ACL_TABLE), "value": { "policy_desc": "{}".format(MONITOR_CONFIG_ACL_TABLE), "ports": get_valid_acl_ports, @@ -157,7 +177,7 @@ def monitor_config_add_config(duthost, get_valid_acl_ports): }, { "op": "add", - "path": "/ACL_RULE", + "path": "{}/ACL_RULE".format(json_namespace), "value": { "{}|{}".format(MONITOR_CONFIG_ACL_TABLE, MONITOR_CONFIG_ACL_RULE): { "DSCP": "5", @@ -168,7 +188,7 @@ def monitor_config_add_config(duthost, get_valid_acl_ports): }, { "op": "add", - "path": "/MIRROR_SESSION", + "path": "{}/MIRROR_SESSION".format(json_namespace), "value": { "{}".format(MONITOR_CONFIG_MIRROR_SESSION): { "dscp": "5", @@ -182,7 +202,7 @@ def monitor_config_add_config(duthost, get_valid_acl_ports): }, { "op": "add", - "path": "/POLICER", + "path": "{}/POLICER".format(json_namespace), "value": { "{}".format(MONITOR_CONFIG_POLICER): { "meter_type": "bytes", @@ -207,15 +227,17 @@ def monitor_config_add_config(duthost, get_valid_acl_ports): delete_tmpfile(duthost, tmpfile) -def test_monitor_config_tc1_suite(rand_selected_dut, get_valid_acl_ports): +def test_monitor_config_tc1_suite(rand_selected_dut, rand_asic_namespace, get_valid_acl_ports): """ Test enable/disable EverflowAlwaysOn config """ + asic_namespace, asic_id = rand_asic_namespace + # Step 1: Create checkpoint at initial state where no monitor config exist - bgp_monitor_config_cleanup(rand_selected_dut) + bgp_monitor_config_cleanup(rand_selected_dut, namespace=asic_namespace) create_checkpoint(rand_selected_dut, MONITOR_CONFIG_INITIAL_CP) # Step 2: Add EverflowAlwaysOn config to rand_selected_dut - monitor_config_add_config(rand_selected_dut, get_valid_acl_ports) + monitor_config_add_config(rand_selected_dut, rand_asic_namespace, get_valid_acl_ports) # Step 3: Create checkpoint that containing desired EverflowAlwaysOn config create_checkpoint(rand_selected_dut, MONITOR_CONFIG_TEST_CP) diff --git a/tests/generic_config_updater/test_ntp.py b/tests/generic_config_updater/test_ntp.py index c8cd298d2d6..85a6002b5dd 100644 --- a/tests/generic_config_updater/test_ntp.py +++ b/tests/generic_config_updater/test_ntp.py @@ -102,10 +102,11 @@ def check_ntp_activestate(duthost): def ntp_server_tc1_add_config(duthost): """ Test to add NTP_SERVER config """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/NTP_SERVER", + "path": "{}/NTP_SERVER".format(json_namespace), "value": { NTP_SERVER_INIT: { "resolve_as": NTP_SERVER_INIT, @@ -119,7 +120,7 @@ def ntp_server_tc1_add_config(duthost): json_patch_bc = [ { "op": "add", - "path": "/NTP_SERVER", + "path": "{}/NTP_SERVER".format(json_namespace), "value": { NTP_SERVER_INIT: {} } @@ -161,11 +162,12 @@ def ntp_server_tc1_xfail(duthost): # ("add", "10.0.0.256"), # Add invalid server ("remove", NTP_SERVER_DUMMY), # Remove unexisted ntp server ] + json_namespace = '/localhost' if duthost.is_multi_asic else '' for op, ntp_server in xfail_input: json_patch = [ { "op": op, - "path": "/NTP_SERVER/{}".format(ntp_server), + "path": "{}/NTP_SERVER/{}".format(json_namespace, ntp_server), "value": {} } ] @@ -184,14 +186,15 @@ def ntp_server_tc1_xfail(duthost): def ntp_server_tc1_replace(duthost): """ Test to replace ntp server """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/NTP_SERVER/{}".format(NTP_SERVER_INIT) + "path": "{}/NTP_SERVER/{}".format(json_namespace, NTP_SERVER_INIT) }, { "op": "add", - "path": "/NTP_SERVER/{}".format(NTP_SERVER_DUMMY), + "path": "{}/NTP_SERVER/{}".format(json_namespace, NTP_SERVER_DUMMY), "value": { "resolve_as": NTP_SERVER_DUMMY, "association_type": "server", @@ -203,11 +206,11 @@ def ntp_server_tc1_replace(duthost): json_patch_bc = [ { "op": "remove", - "path": "/NTP_SERVER/{}".format(NTP_SERVER_INIT) + "path": "{}/NTP_SERVER/{}".format(json_namespace, NTP_SERVER_INIT) }, { "op": "add", - "path": "/NTP_SERVER/{}".format(NTP_SERVER_DUMMY), + "path": "{}/NTP_SERVER/{}".format(json_namespace, NTP_SERVER_DUMMY), "value": {} } ] @@ -239,10 +242,11 @@ def ntp_server_tc1_replace(duthost): def ntp_server_tc1_remove(duthost): """ Test to remove ntp server """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/NTP_SERVER" + "path": "{}/NTP_SERVER".format(json_namespace) } ] diff --git a/tests/generic_config_updater/test_pfcwd_interval.py b/tests/generic_config_updater/test_pfcwd_interval.py index fd6d8d16ec6..9d1ab01c9a0 100644 --- a/tests/generic_config_updater/test_pfcwd_interval.py +++ b/tests/generic_config_updater/test_pfcwd_interval.py @@ -3,6 +3,7 @@ import json from tests.common.helpers.assertions import pytest_assert +from tests.common.helpers.constants import DEFAULT_ASIC_ID from tests.common.utilities import wait_until from tests.common.gu_utils import apply_patch, expect_op_success, expect_op_failure from tests.common.gu_utils import generate_tmpfile, delete_tmpfile @@ -42,16 +43,28 @@ def ensure_dut_readiness(duthost): @pytest.fixture(autouse=True, scope="module") def enable_default_pfcwd_configuration(duthost): res = duthost.shell('redis-dump -d 4 --pretty -k \"DEVICE_METADATA|localhost\"') + for asic_id in duthost.get_asic_ids(): + if asic_id: + res = duthost.asic_instance(asic_id).command('redis-dump -d 4 --pretty -k \"DEVICE_METADATA|localhost\"') meta_data = json.loads(res["stdout"]) pfc_status = meta_data["DEVICE_METADATA|localhost"]["value"].get("default_pfcwd_status", "") if pfc_status == 'disable': duthost.shell('redis-cli -n 4 hset \"DEVICE_METADATA|localhost\" default_pfcwd_status enable') + for asic_id in duthost.get_asic_ids(): + if asic_id: + duthost.asic_instance(asic_id).command( + 'redis-cli -n 4 hset \"DEVICE_METADATA|localhost\" default_pfcwd_status enable' + ) # Enable default pfcwd configuration start_pfcwd = duthost.shell('config pfcwd start_default') pytest_assert(not start_pfcwd['rc'], "Failed to start default pfcwd config") + for asic_id in duthost.get_asic_ids(): + if asic_id: + start_pfcwd = duthost.asic_instance(asic_id).command('config pfcwd start_default') + pytest_assert(not start_pfcwd['rc'], "Failed to start default pfcwd config") -def ensure_application_of_updated_config(duthost, value): +def ensure_application_of_updated_config(duthost, value, namespace=None): """ Ensures application of the JSON patch config update by verifying field value presence in FLEX COUNTER DB @@ -60,8 +73,9 @@ def ensure_application_of_updated_config(duthost, value): value: expected value of POLL_INTERVAL """ def _confirm_value_in_flex_counter_db(): - poll_interval = duthost.shell( - 'sonic-db-cli PFC_WD_DB hget FLEX_COUNTER_GROUP_TABLE:PFC_WD POLL_INTERVAL')["stdout"] + namespace_prefix = '' if namespace is None else '-n ' + namespace + cmd = 'sonic-db-cli {} PFC_WD_DB hget FLEX_COUNTER_GROUP_TABLE:PFC_WD POLL_INTERVAL'.format(namespace_prefix) + poll_interval = duthost.shell(cmd)["stdout"] return value == poll_interval pytest_assert( @@ -87,7 +101,11 @@ def prepare_pfcwd_interval_config(duthost, value): else: cmd = r"sonic-db-cli CONFIG_DB del \PFC_WD\GLOBAL\POLL_INTERVAL" - duthost.shell(cmd) + for asic_id in duthost.get_asic_ids(): + if not asic_id: + duthost.shell(cmd) + else: + duthost.asic_instance(asic_id).command(cmd) def get_detection_restoration_times(duthost): @@ -99,7 +117,12 @@ def get_detection_restoration_times(duthost): duthost: DUT host object """ - duthost.shell('config pfcwd start --action drop all 400 --restoration-time 400', module_ignore_errors=True) + cmd = 'config pfcwd start --action drop all 400 --restoration-time 400' + for asic_id in duthost.get_asic_ids(): + if not asic_id: + duthost.shell(cmd, module_ignore_errors=True) + else: + duthost.asic_instance(asic_id).command(cmd) # module_ignore_errors=True per asic? pfcwd_config = duthost.shell("show pfcwd config") pytest_assert(not pfcwd_config['rc'], "Unable to read pfcwd config") @@ -107,12 +130,22 @@ def get_detection_restoration_times(duthost): if line.startswith('Ethernet'): interface = line.split()[0] # Since line starts with Ethernet, we can safely use 0 index - cmd = "sonic-db-cli CONFIG_DB hget \"PFC_WD|{}\" \"detection_time\" ".format(interface) + asic_index = DEFAULT_ASIC_ID + if duthost.is_multi_asic: + asic_index = duthost.get_port_asic_instance(interface).asic_index + namespace = duthost.get_namespace_from_asic_id(asic_index) + namespace_prefix = '-n ' + namespace if namespace else '' + + # get info per asic interface in case multi-asic + cmd = "sonic-db-cli {} CONFIG_DB hget \"PFC_WD|{}\" \"detection_time\" ".format(namespace_prefix, interface) output = duthost.shell(cmd, module_ignore_errors=True) pytest_assert(not output['rc'], "Unable to read detection time") detection_time = output["stdout"] - cmd = "sonic-db-cli CONFIG_DB hget \"PFC_WD|{}\" \"restoration_time\" ".format(interface) + cmd = "sonic-db-cli {} CONFIG_DB hget \"PFC_WD|{}\" \"restoration_time\" ".format( + namespace_prefix, + interface + ) output = duthost.shell(cmd, module_ignore_errors=True) pytest_assert(not output['rc'], "Unable to read restoration time") restoration_time = output["stdout"] @@ -143,7 +176,9 @@ def get_new_interval(duthost, is_valid): @pytest.mark.parametrize("field_pre_status", ["existing", "nonexistent"]) @pytest.mark.parametrize("is_valid_config_update", [True, False]) def test_pfcwd_interval_config_updates(duthost, ensure_dut_readiness, oper, - field_pre_status, is_valid_config_update): + field_pre_status, is_valid_config_update, rand_asic_namespace): + + asic_namespace, asic_id = rand_asic_namespace new_interval = get_new_interval(duthost, is_valid_config_update) operation_to_new_value_map = {"add": "{}".format(new_interval), "replace": "{}".format(new_interval)} @@ -158,10 +193,11 @@ def test_pfcwd_interval_config_updates(duthost, ensure_dut_readiness, oper, value = operation_to_new_value_map[oper] logger.info("value to be added to json patch: {}".format(value)) + json_namespace = '' if asic_namespace is None else '/' + asic_namespace json_patch = [ { "op": "{}".format(oper), - "path": "/PFC_WD/GLOBAL/POLL_INTERVAL", + "path": "{}/PFC_WD/GLOBAL/POLL_INTERVAL".format(json_namespace), "value": "{}".format(value) }] @@ -170,7 +206,7 @@ def test_pfcwd_interval_config_updates(duthost, ensure_dut_readiness, oper, if is_valid_config_update and is_valid_platform_and_version(duthost, "PFC_WD", "PFCWD enable/disable", oper): expect_op_success(duthost, output) - ensure_application_of_updated_config(duthost, value) + ensure_application_of_updated_config(duthost, value, asic_namespace) else: expect_op_failure(output) finally: diff --git a/tests/generic_config_updater/test_pfcwd_status.py b/tests/generic_config_updater/test_pfcwd_status.py index 76f5828d6b5..9cda9d9e387 100644 --- a/tests/generic_config_updater/test_pfcwd_status.py +++ b/tests/generic_config_updater/test_pfcwd_status.py @@ -59,14 +59,24 @@ def set_default_pfcwd_config(duthost): meta_data = json.loads(res["stdout"]) pfc_status = meta_data["DEVICE_METADATA|localhost"]["value"].get("default_pfcwd_status", "") if pfc_status == 'disable': - duthost.shell('sonic-db-cli CONFIG_DB hset \"DEVICE_METADATA|localhost\" default_pfcwd_status enable') + cmd = 'sonic-db-cli CONFIG_DB hset \"DEVICE_METADATA|localhost\" default_pfcwd_status enable' + for asic_id in duthost.get_asic_ids(): + if asic_id: + duthost.asic_instance(asic_id).command(cmd) + else: + duthost.shell(cmd) yield # Restore default config duthost.shell('config pfcwd stop') if pfc_status == 'disable': - duthost.shell('sonic-db-cli CONFIG_DB hset \"DEVICE_METADATA|localhost\" default_pfcwd_status disable') + cmd = 'sonic-db-cli CONFIG_DB hset \"DEVICE_METADATA|localhost\" default_pfcwd_status disable' + for asic_id in duthost.get_asic_ids(): + if asic_id: + duthost.asic_instance(asic_id).command(cmd) + else: + duthost.shell(cmd) else: start_pfcwd = duthost.shell('config pfcwd start_default') pytest_assert(not start_pfcwd['rc'], "Failed to start default pfcwd config") @@ -105,7 +115,12 @@ def stop_pfcwd(duthost): Args: duthost: DUT host object """ - duthost.shell('config pfcwd stop') + cmd = 'sudo config pfcwd stop' + for asic_id in duthost.get_asic_ids(): + if asic_id: + duthost.asic_instance(asic_id).command(cmd) + else: + duthost.shell(cmd) yield @@ -117,12 +132,17 @@ def start_pfcwd(duthost): Args: duthost: DUT host object """ - duthost.shell('config pfcwd start_default') + cmd = 'sudo config pfcwd start_default' + for asic_id in duthost.get_asic_ids(): + if asic_id: + duthost.asic_instance(asic_id).command(cmd) + else: + duthost.shell(cmd) yield @pytest.fixture -def extract_pfcwd_config(duthost, start_pfcwd): +def extract_pfcwd_config(duthost, start_pfcwd, rand_asic_namespace): """ Extract pfcwd info from running config @@ -133,7 +153,9 @@ def extract_pfcwd_config(duthost, start_pfcwd): pfcwd_config: dict of dicts with interface as the 1st level key and 'action', 'detect_time', 'restore_time' as the 2nd level keys """ - output = duthost.command('show pfcwd config') + asic_namespace, asic_id = rand_asic_namespace + cmd = 'show pfcwd config -n {}'.format(asic_namespace) if asic_namespace is not None else 'show pfcwd config' + output = duthost.command(cmd) pytest_assert('Ethernet' in output['stdout'], 'No ports found in the pfcwd config') pfcwd_config = defaultdict() @@ -147,46 +169,56 @@ def extract_pfcwd_config(duthost, start_pfcwd): yield pfcwd_config -def get_flex_db_count(duthost): +def get_flex_db_count(duthost, namespace=None): """ Get the count of the number of pfcwd entries seen in flex db For every port, there will be 3 entries - 1 for the port, 1 for queue 3 and 1 for queue 4 Args: duthost: DUT host object + namespace: namespace to be used for the command Returns: Number of PFCWD related flex db entries """ - db_entries = duthost.shell('sonic-db-cli FLEX_COUNTER_DB keys *FLEX_COUNTER_TABLE:PFC_WD*')["stdout"] + + namespace_prefix = '-n ' + namespace if namespace else '' + cmd = 'sonic-db-cli {} FLEX_COUNTER_DB keys *FLEX_COUNTER_TABLE:PFC_WD*'.format(namespace_prefix) + db_entries = duthost.shell(cmd)["stdout"] + if db_entries == '': return 0 else: return len(db_entries.split('\n')) -def check_config_update(duthost, expected_count): +def check_config_update(duthost, expected_count, namespace=None): """ Ensures application of the JSON patch config update Args: duthost: DUT host object expected_count: number of pfcwd entries expected in the updated config + namespace: namespace to be used for the command """ - def _confirm_value_in_flex_db(duthost, expected_count): - pfcwd_entries_count = get_flex_db_count(duthost) + def _confirm_value_in_flex_db(): + pfcwd_entries_count = get_flex_db_count(duthost, namespace) logger.info("Actual number of entries: {}".format(pfcwd_entries_count)) return pfcwd_entries_count == expected_count logger.info("Validating in FLEX COUNTER DB...") pytest_assert( - wait_until(READ_FLEXDB_TIMEOUT, READ_FLEXDB_INTERVAL, 0, _confirm_value_in_flex_db, duthost, expected_count), - "FLEX DB does not properly reflect Pfcwd status: Expected number of entries {}" - .format(expected_count) + wait_until( + READ_FLEXDB_TIMEOUT, + READ_FLEXDB_INTERVAL, + 0, + _confirm_value_in_flex_db + ), + "FLEX DB does not properly reflect Pfcwd status: Expected number of entries {}".format(expected_count) ) @pytest.mark.parametrize('port', ['single', 'all']) -def test_stop_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, port): +def test_stop_pfcwd(duthost, rand_asic_namespace, extract_pfcwd_config, ensure_dut_readiness, port): """ Tests GCU config for pfcwd stop scenario 1. Covers the case for stopping pfcwd on single port and all ports @@ -194,6 +226,7 @@ def test_stop_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, port): 3. Validates the number of PFC_WD related entries in FLEX DB is as expected 4. Validates that orchagent is running fine pre and post test """ + asic_namespace, asic_id = rand_asic_namespace pfcwd_config = extract_pfcwd_config initial_count = len(pfcwd_config) * FLEXDB_COUNTERS_PER_PORT @@ -204,15 +237,22 @@ def test_stop_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, port): json_patch = list() exp_str = 'Ethernet' for interface in pfcwd_config: + + asic_index = None + json_namespace = '' + if duthost.is_multi_asic: + asic_index = duthost.get_port_asic_instance(interface).asic_index + ns = duthost.get_namespace_from_asic_id(asic_index) + json_namespace = '/' + ns + json_patch.extend([ { 'op': 'remove', - 'path': '/PFC_WD/{}'.format(interface) + 'path': '{}/PFC_WD/{}'.format(json_namespace, interface) }]) if port == 'single': exp_str = interface break - try: tmpfile = generate_tmpfile(duthost) output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) @@ -221,13 +261,13 @@ def test_stop_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, port): pytest_assert(not pfcwd_updated_config['rc'], "Unable to read updated pfcwd config") pytest_assert(exp_str not in pfcwd_updated_config['stdout'].split(), "pfcwd unexpectedly still running") - check_config_update(duthost, expected_count) + check_config_update(duthost, expected_count, asic_namespace) finally: delete_tmpfile(duthost, tmpfile) @pytest.mark.parametrize('port', ['single', 'all']) -def test_start_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, stop_pfcwd, port): +def test_start_pfcwd(duthost, rand_asic_namespace, extract_pfcwd_config, ensure_dut_readiness, stop_pfcwd, port): """ Tests GCU config for pfcwd start scenario 1. Covers the case for starting pfcwd on single port and all ports @@ -235,6 +275,7 @@ def test_start_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, stop_p 3. Validates the number of PFC_WD related entries in FLEX DB is as expected 4. Validates that orchagent is running fine pre and post test """ + asic_namespace, asic_id = rand_asic_namespace pfcwd_config = extract_pfcwd_config if port == 'single': @@ -245,10 +286,17 @@ def test_start_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, stop_p exp_str = 'Ethernet' op = 'add' for interface, value in pfcwd_config.items(): + + asic_index = None + json_namespace = '' + if duthost.is_multi_asic: + asic_index = duthost.get_port_asic_instance(interface).asic_index + ns = duthost.get_namespace_from_asic_id(asic_index) + json_namespace = '/' + ns json_patch.extend([ { 'op': op, - 'path': '/PFC_WD/{}'.format(interface), + 'path': '{}/PFC_WD/{}'.format(json_namespace, interface), 'value': {'action': value['action'], 'detection_time': value['detect_time'], 'restoration_time': value['restore_time']}}]) @@ -265,7 +313,7 @@ def test_start_pfcwd(duthost, extract_pfcwd_config, ensure_dut_readiness, stop_p pytest_assert(not pfcwd_updated_config['rc'], "Unable to read updated pfcwd config") pytest_assert(exp_str in pfcwd_updated_config['stdout'], "pfcwd not started - unexpected") - check_config_update(duthost, expected_count) + check_config_update(duthost, expected_count, asic_namespace) else: expect_op_failure(output) finally: diff --git a/tests/generic_config_updater/test_pg_headroom_update.py b/tests/generic_config_updater/test_pg_headroom_update.py index 16a0b2e6f0c..74eb33a5cb4 100644 --- a/tests/generic_config_updater/test_pg_headroom_update.py +++ b/tests/generic_config_updater/test_pg_headroom_update.py @@ -42,7 +42,7 @@ def ensure_dut_readiness(duthost): delete_checkpoint(duthost) -def ensure_application_of_updated_config(duthost, xoff, values): +def ensure_application_of_updated_config(duthost, xoff, values, namespace=None): """ Ensures application of the JSON patch config update @@ -52,17 +52,20 @@ def ensure_application_of_updated_config(duthost, xoff, values): """ def _confirm_value_in_app_and_asic_db(): + namespace_prefix = '' if namespace is None else '-n ' + namespace for profile in xoff: - profile_data = duthost.shell('sonic-db-cli APPL_DB hgetall "BUFFER_PROFILE_TABLE:{}"' - .format(profile))["stdout"] + profile_data = duthost.shell('sonic-db-cli {} APPL_DB hgetall "BUFFER_PROFILE_TABLE:{}"' + .format(namespace_prefix, profile))["stdout"] profile_data = ast.literal_eval(profile_data) if profile_data["xoff"] != xoff[profile]: return False count = 0 - table_name = duthost.shell('sonic-db-cli ASIC_DB keys *BUFFER_PROFILE*')["stdout_lines"] + table_name = duthost.shell('sonic-db-cli {} ASIC_DB keys *BUFFER_PROFILE*' + .format(namespace_prefix))["stdout_lines"] for table in table_name: - profile_data = duthost.shell('sonic-db-cli ASIC_DB hgetall "{}"'.format(table))["stdout"] + profile_data = duthost.shell('sonic-db-cli {} ASIC_DB hgetall "{}"' + .format(namespace_prefix, table))["stdout"] profile_data = ast.literal_eval(profile_data) if "SAI_BUFFER_PROFILE_ATTR_XOFF_TH" in profile_data: count += 1 @@ -70,7 +73,7 @@ def _confirm_value_in_app_and_asic_db(): return False return count == len(values) - logger.info("Validating fields in APPL DB and ASIC DB...") + logger.info("Validating fields in APPL DB and ASIC DB for namespace {}...".format(namespace)) pytest_assert( wait_until(READ_ASICDB_TIMEOUT, READ_ASICDB_INTERVAL, 0, _confirm_value_in_app_and_asic_db), "APPL DB or ASIC DB does not properly reflect newly configured value(s) for xoff" @@ -78,7 +81,9 @@ def _confirm_value_in_app_and_asic_db(): @pytest.mark.parametrize("operation", ["replace"]) -def test_pg_headroom_update(duthost, ensure_dut_readiness, operation, skip_when_buffer_is_dynamic_model): +def test_pg_headroom_update(duthost, ensure_dut_readiness, operation, + skip_when_buffer_is_dynamic_model, rand_asic_namespace): + asic_namespace, asic_id = rand_asic_namespace asic_type = get_asic_name(duthost) pytest_require("td2" not in asic_type, "PG headroom should be skipped on TD2") tmpfile = generate_tmpfile(duthost) @@ -86,10 +91,13 @@ def test_pg_headroom_update(duthost, ensure_dut_readiness, operation, skip_when_ json_patch = list() values = list() xoff = dict() - lossless_profiles = duthost.shell('sonic-db-cli CONFIG_DB keys *BUFFER_PROFILE\\|pg_lossless*')['stdout_lines'] + namespace_prefix = '' if asic_namespace is None else '-n ' + asic_namespace + lossless_profiles = duthost.shell('sonic-db-cli {} CONFIG_DB keys *BUFFER_PROFILE\\|pg_lossless*' + .format(namespace_prefix))['stdout_lines'] + json_namespace = '' if asic_namespace is None else '/' + asic_namespace for profile in lossless_profiles: profile_name = profile.split('|')[-1] - value = duthost.shell('sonic-db-cli CONFIG_DB hget "{}" "xoff"'.format(profile))['stdout'] + value = duthost.shell('sonic-db-cli {} CONFIG_DB hget "{}" "xoff"'.format(namespace_prefix, profile))['stdout'] value = int(value) value -= 1000 xoff[profile_name] = str(value) @@ -100,14 +108,14 @@ def test_pg_headroom_update(duthost, ensure_dut_readiness, operation, skip_when_ json_patch.append( {"op": "{}".format(operation), - "path": "/BUFFER_PROFILE/{}/xoff".format(profile_name), + "path": "{}/BUFFER_PROFILE/{}/xoff".format(json_namespace, profile_name), "value": "{}".format(value)}) try: output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) if is_valid_platform_and_version(duthost, "BUFFER_PROFILE", "PG headroom modification", operation): expect_op_success(duthost, output) - ensure_application_of_updated_config(duthost, xoff, values) + ensure_application_of_updated_config(duthost, xoff, values, asic_namespace) else: expect_op_failure(output) finally: diff --git a/tests/generic_config_updater/test_portchannel_interface.py b/tests/generic_config_updater/test_portchannel_interface.py index f7f8e0b29c4..9bcfa7d5f78 100644 --- a/tests/generic_config_updater/test_portchannel_interface.py +++ b/tests/generic_config_updater/test_portchannel_interface.py @@ -26,7 +26,7 @@ # } pytestmark = [ - pytest.mark.topology('t0', 'm0'), + pytest.mark.topology('t0', 'm0', 't2'), ] logger = logging.getLogger(__name__) @@ -51,6 +51,14 @@ def _is_ipv4_address(ip_addr): return portchannel_table +@pytest.fixture(scope="module") +def rand_portchannel_name(cfg_facts): + portchannel_dict = cfg_facts.get('PORTCHANNEL', {}) + pytest_require(portchannel_dict, "Portchannel table is empty") + for portchannel_key in portchannel_dict: + return portchannel_key + + def check_portchannel_table(duthost, portchannel_table): """This is to check if portchannel interfaces are the same as t0 initial setup """ @@ -80,22 +88,42 @@ def setup_env(duthosts, rand_one_dut_hostname, portchannel_table): delete_checkpoint(duthost) -def portchannel_interface_tc1_add_duplicate(duthost, portchannel_table): +def portchannel_interface_tc1_add_duplicate(duthost, portchannel_table, rand_asic_namespace, rand_portchannel_name): """ Test adding duplicate portchannel interface """ - dup_ip = portchannel_table["PortChannel101"]["ip"] - dup_ipv6 = portchannel_table["PortChannel101"]["ipv6"] + asic_namespace, asic_id = rand_asic_namespace + dup_ip = portchannel_table[rand_portchannel_name]["ip"] + dup_ipv6 = portchannel_table[rand_portchannel_name]["ipv6"] + + path_dup_ip = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, dup_ip) + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, dup_ip) + ]) + ) + path_dup_ipv6 = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, dup_ipv6.upper()) + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, dup_ipv6.upper()) + ]) + ) json_patch = [ { "op": "add", - "path": create_path(["PORTCHANNEL_INTERFACE", - "PortChannel101|{}".format(dup_ip)]), + "path": path_dup_ip, "value": {} }, { "op": "add", - "path": create_path(["PORTCHANNEL_INTERFACE", - "PortChannel101|{}".format(dup_ipv6.upper())]), + "path": path_dup_ipv6, "value": {} } ] @@ -107,13 +135,13 @@ def portchannel_interface_tc1_add_duplicate(duthost, portchannel_table): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - check_show_ip_intf(duthost, "PortChannel101", [dup_ip], [], is_ipv4=True) - check_show_ip_intf(duthost, "PortChannel101", [dup_ipv6], [], is_ipv4=False) + check_show_ip_intf(duthost, rand_portchannel_name, [dup_ip], [], is_ipv4=True) + check_show_ip_intf(duthost, rand_portchannel_name, [dup_ipv6], [], is_ipv4=False) finally: delete_tmpfile(duthost, tmpfile) -def portchannel_interface_tc1_xfail(duthost): +def portchannel_interface_tc1_xfail(duthost, rand_asic_namespace, rand_portchannel_name): """ Test invalid ip address and remove unexited interface ("add", "PortChannel101", "10.0.0.256/31", "FC00::71/126"), ADD Invalid IPv4 address @@ -121,25 +149,47 @@ def portchannel_interface_tc1_xfail(duthost): ("remove", "PortChannel101", "10.0.0.57/31", "FC00::71/126"), REMOVE Unexist IPv4 address ("remove", "PortChannel101", "10.0.0.56/31", "FC00::72/126"), REMOVE Unexist IPv6 address """ + asic_namespace, asic_id = rand_asic_namespace xfail_input = [ - ("add", "PortChannel101", "10.0.0.256/31", "FC00::71/126"), - ("add", "PortChannel101", "10.0.0.56/31", "FC00::xyz/126"), - ("remove", "PortChannel101", "10.0.0.57/31", "FC00::71/126"), - ("remove", "PortChannel101", "10.0.0.56/31", "FC00::72/126") + ("add", rand_portchannel_name, "10.0.0.256/31", "FC00::71/126"), + ("add", rand_portchannel_name, "10.0.0.56/31", "FC00::xyz/126"), + ("remove", rand_portchannel_name, "10.0.0.57/31", "FC00::71/126"), + ("remove", rand_portchannel_name, "10.0.0.56/31", "FC00::72/126") ] for op, po_name, ip, ipv6 in xfail_input: po_ip = po_name + "|" + ip po_ipv6 = po_name + "|" + ipv6 + + path_po_ip = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + po_ip + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + po_ip + ]) + ) + path_po_ipv6 = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + po_ipv6 + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + po_ipv6 + ]) + ) json_patch = [ { "op": "{}".format(op), - "path": create_path(["PORTCHANNEL_INTERFACE", po_ip]), + "path": path_po_ip, "value": {} }, { "op": "{}".format(op), - "path": create_path(["PORTCHANNEL_INTERFACE", po_ipv6]), + "path": path_po_ipv6, "value": {} } ] @@ -154,34 +204,70 @@ def portchannel_interface_tc1_xfail(duthost): delete_tmpfile(duthost, tmpfile) -def portchannel_interface_tc1_add_and_rm(duthost, portchannel_table): +def portchannel_interface_tc1_add_and_rm(duthost, portchannel_table, rand_asic_namespace, rand_portchannel_name): """ Test portchannel interface replace ip address """ - org_ip = portchannel_table["PortChannel101"]["ip"] - org_ipv6 = portchannel_table["PortChannel101"]["ipv6"] + asic_namespace, asic_id = rand_asic_namespace + org_ip = portchannel_table[rand_portchannel_name]["ip"] + org_ipv6 = portchannel_table[rand_portchannel_name]["ipv6"] rep_ip = "10.0.0.156/31" rep_ipv6 = "fc00::171/126" + path_org_ip = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, org_ip) + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", "{}|{}".format(rand_portchannel_name, org_ip) + ]) + ) + path_org_ipv6 = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, org_ipv6.upper()) + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, org_ipv6.upper()) + ]) + ) + path_rep_ip = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, rep_ip) + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, rep_ip) + ]) + ) + path_rep_ipv6 = ( + create_path([ + asic_namespace, + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, rep_ipv6.upper()) + ]) if asic_namespace is not None else create_path([ + "PORTCHANNEL_INTERFACE", + "{}|{}".format(rand_portchannel_name, rep_ipv6.upper()) + ]) + ) json_patch = [ { "op": "remove", - "path": create_path(["PORTCHANNEL_INTERFACE", - "PortChannel101|{}".format(org_ip)]) + "path": path_org_ip }, { "op": "remove", - "path": create_path(["PORTCHANNEL_INTERFACE", - "PortChannel101|{}".format(org_ipv6.upper())]) + "path": path_org_ipv6 }, { "op": "add", - "path": create_path(["PORTCHANNEL_INTERFACE", - "PortChannel101|{}".format(rep_ip)]), + "path": path_rep_ip, "value": {} }, { "op": "add", - "path": create_path(["PORTCHANNEL_INTERFACE", - "PortChannel101|{}".format(rep_ipv6)]), + "path": path_rep_ipv6, "value": {} } ] @@ -193,16 +279,20 @@ def portchannel_interface_tc1_add_and_rm(duthost, portchannel_table): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - check_show_ip_intf(duthost, "PortChannel101", [rep_ip], [org_ip], is_ipv4=True) - check_show_ip_intf(duthost, "PortChannel101", [rep_ipv6], [org_ipv6], is_ipv4=False) + check_show_ip_intf(duthost, rand_portchannel_name, [rep_ip], [org_ip], is_ipv4=True) + check_show_ip_intf(duthost, rand_portchannel_name, [rep_ipv6], [org_ipv6], is_ipv4=False) finally: delete_tmpfile(duthost, tmpfile) -def test_portchannel_interface_tc1_suite(rand_selected_dut, portchannel_table): - portchannel_interface_tc1_add_duplicate(rand_selected_dut, portchannel_table) - portchannel_interface_tc1_xfail(rand_selected_dut) - portchannel_interface_tc1_add_and_rm(rand_selected_dut, portchannel_table) +def test_portchannel_interface_tc1_suite(rand_selected_dut, portchannel_table, + rand_asic_namespace, rand_portchannel_name): + portchannel_interface_tc1_add_duplicate(rand_selected_dut, portchannel_table, + rand_asic_namespace, rand_portchannel_name) + portchannel_interface_tc1_xfail(rand_selected_dut, + rand_asic_namespace, rand_portchannel_name) + portchannel_interface_tc1_add_and_rm(rand_selected_dut, portchannel_table, + rand_asic_namespace, rand_portchannel_name) def verify_po_running(duthost, portchannel_table): @@ -245,20 +335,21 @@ def verify_attr_change(duthost, po_name, attr, value): pytest_assert(output['stdout'].startswith(value), "{} {} change failed".format(po_name, attr)) -def portchannel_interface_tc2_replace(duthost): +def portchannel_interface_tc2_replace(duthost, rand_asic_namespace, rand_portchannel_name): """Test PortChannelXXXX attribute change """ + asic_namespace, asic_id = rand_asic_namespace attributes = [ ("mtu", "3324"), ("min_links", "2"), ("admin_status", "down") ] - + json_namespace = '' if asic_namespace is None else '/' + asic_namespace json_patch = [] for attr, value in attributes: patch = { "op": "replace", - "path": "/PORTCHANNEL/PortChannel101/{}".format(attr), + "path": "{}/PORTCHANNEL/{}/{}".format(json_namespace, rand_portchannel_name, attr), "value": value } json_patch.append(patch) @@ -270,21 +361,23 @@ def portchannel_interface_tc2_replace(duthost): output = apply_patch(duthost, json_data=json_patch, dest_file=tmpfile) expect_op_success(duthost, output) - verify_po_running(duthost, ["PortChannel101"]) + verify_po_running(duthost, [rand_portchannel_name]) for attr, value in attributes: - verify_attr_change(duthost, "PortChannel101", attr, value) + verify_attr_change(duthost, rand_portchannel_name, attr, value) finally: delete_tmpfile(duthost, tmpfile) -def portchannel_interface_tc2_incremental(duthost): +def portchannel_interface_tc2_incremental(duthost, rand_asic_namespace, rand_portchannel_name): """Test PortChannelXXXX incremental change """ + asic_namespace, asic_id = rand_asic_namespace + json_namespace = '' if asic_namespace is None else '/' + asic_namespace json_patch = [ { "op": "add", - "path": "/PORTCHANNEL/PortChannel101/description", - "value": "Description for PortChannel101" + "path": "{}/PORTCHANNEL/{}/description".format(json_namespace, rand_portchannel_name), + "value": "Description for {}".format(rand_portchannel_name) } ] @@ -298,6 +391,6 @@ def portchannel_interface_tc2_incremental(duthost): delete_tmpfile(duthost, tmpfile) -def test_portchannel_interface_tc2_attributes(rand_selected_dut): - portchannel_interface_tc2_replace(rand_selected_dut) - portchannel_interface_tc2_incremental(rand_selected_dut) +def test_portchannel_interface_tc2_attributes(rand_selected_dut, rand_asic_namespace, rand_portchannel_name): + portchannel_interface_tc2_replace(rand_selected_dut, rand_asic_namespace, rand_portchannel_name) + portchannel_interface_tc2_incremental(rand_selected_dut, rand_asic_namespace, rand_portchannel_name) diff --git a/tests/generic_config_updater/test_syslog.py b/tests/generic_config_updater/test_syslog.py index 47bbecb484a..0c964eeddff 100644 --- a/tests/generic_config_updater/test_syslog.py +++ b/tests/generic_config_updater/test_syslog.py @@ -115,10 +115,11 @@ def syslog_server_tc1_add_init(duthost): [10.0.0.5] [cc98:2008::1] """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/SYSLOG_SERVER", + "path": "{}/SYSLOG_SERVER".format(json_namespace), "value": { SYSLOG_DUMMY_IPV4_SERVER: {}, SYSLOG_DUMMY_IPV6_SERVER: {} @@ -150,15 +151,16 @@ def syslog_server_tc1_add_duplicate(duthost): [10.0.0.5] [cc98:2008::1] """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "add", - "path": "/SYSLOG_SERVER/{}".format(SYSLOG_DUMMY_IPV4_SERVER), + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, SYSLOG_DUMMY_IPV4_SERVER), "value": {} }, { "op": "add", - "path": "/SYSLOG_SERVER/{}".format(SYSLOG_DUMMY_IPV6_SERVER), + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, SYSLOG_DUMMY_IPV6_SERVER), "value": {} } ] @@ -191,17 +193,17 @@ def syslog_server_tc1_xfail(duthost): ("remove", "10.0.0.6", "cc98:2008:1"), ("remove", "10.0.0.5", "cc98:2008::2") ] - + json_namespace = '/localhost' if duthost.is_multi_asic else '' for op, dummy_syslog_server_hostname, dummy_syslog_server_v6 in xfail_input: json_patch = [ { "op": "{}".format(op), - "path": "/SYSLOG_SERVER/{}".format(dummy_syslog_server_hostname), + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, dummy_syslog_server_hostname), "value": {} }, { "op": "{}".format(op), - "path": "/SYSLOG_SERVER/{}".format(dummy_syslog_server_v6), + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, dummy_syslog_server_v6), "value": {} } ] @@ -225,23 +227,24 @@ def syslog_server_tc1_replace(duthost): [10.0.0.6] [cc98:2008::2] """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/SYSLOG_SERVER/{}".format(SYSLOG_DUMMY_IPV6_SERVER) + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, SYSLOG_DUMMY_IPV6_SERVER) }, { "op": "remove", - "path": "/SYSLOG_SERVER/{}".format(SYSLOG_DUMMY_IPV4_SERVER) + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, SYSLOG_DUMMY_IPV4_SERVER) }, { "op": "add", - "path": "/SYSLOG_SERVER/{}".format(REPLACE_SYSLOG_SERVER_v4), + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, REPLACE_SYSLOG_SERVER_v4), "value": {} }, { "op": "add", - "path": "/SYSLOG_SERVER/{}".format(REPLACE_SYSLOG_SERVER_v6), + "path": "{}/SYSLOG_SERVER/{}".format(json_namespace, REPLACE_SYSLOG_SERVER_v6), "value": {} } ] @@ -270,10 +273,11 @@ def syslog_server_tc1_remove(duthost): Syslog Servers ---------------- """ + json_namespace = '/localhost' if duthost.is_multi_asic else '' json_patch = [ { "op": "remove", - "path": "/SYSLOG_SERVER" + "path": "{}/SYSLOG_SERVER".format(json_namespace) } ]