From c7bc25e3112bf1e7a07a43532029aedcc0a84f57 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Thu, 20 Feb 2020 12:31:32 +0200 Subject: [PATCH 001/111] [fwutil]: Command-line utility for interacting with platform components (#772) * [fwutil]: initial version. Signed-off-by: Nazarii Hnydyn * [fwutil]: Fix UI: enable progressbar render finalizer. Signed-off-by: Nazarii Hnydyn * [fwutil]: integrate utility with SONiC CLI. Signed-off-by: Nazarii Hnydyn * [fwutil]: update CLI command reference documentation. Signed-off-by: Nazarii Hnydyn * [fwutil]: Revisit CLI architecture: avoid direct imports. Signed-off-by: Nazarii Hnydyn * [fwutil]: Fix review comments: refactor CLI command reference. Signed-off-by: Nazarii Hnydyn * [fwutil]: Fix review comments: update CLI documentation. Signed-off-by: Nazarii Hnydyn --- config/main.py | 42 ++ data/etc/bash_completion.d/fwutil | 8 + doc/Command-Reference.md | 169 ++++++ fwutil/__init__.py | 5 + fwutil/lib.py | 827 ++++++++++++++++++++++++++++++ fwutil/log.py | 126 +++++ fwutil/main.py | 300 +++++++++++ setup.py | 2 + show/main.py | 7 + 9 files changed, 1486 insertions(+) create mode 100644 data/etc/bash_completion.d/fwutil create mode 100755 fwutil/__init__.py create mode 100755 fwutil/lib.py create mode 100755 fwutil/log.py create mode 100755 fwutil/main.py diff --git a/config/main.py b/config/main.py index 0d9d134b19..c54ce39940 100755 --- a/config/main.py +++ b/config/main.py @@ -2030,6 +2030,48 @@ def platform(): if asic_type == 'mellanox': platform.add_command(mlnx.mlnx) +# 'firmware' subgroup ("config platform firmware ...") +@platform.group() +def firmware(): + """Firmware configuration tasks""" + pass + +# 'install' subcommand ("config platform firmware install") +@firmware.command( + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True + ), + add_help_option=False +) +@click.argument('args', nargs=-1, type=click.UNPROCESSED) +def install(args): + """Install platform firmware""" + cmd = "fwutil install {}".format(" ".join(args)) + + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + sys.exit(e.returncode) + +# 'update' subcommand ("config platform firmware update") +@firmware.command( + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True + ), + add_help_option=False +) +@click.argument('args', nargs=-1, type=click.UNPROCESSED) +def update(args): + """Update platform firmware""" + cmd = "fwutil update {}".format(" ".join(args)) + + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + sys.exit(e.returncode) + # # 'watermark' group ("show watermark telemetry interval") # diff --git a/data/etc/bash_completion.d/fwutil b/data/etc/bash_completion.d/fwutil new file mode 100644 index 0000000000..60ec589a6a --- /dev/null +++ b/data/etc/bash_completion.d/fwutil @@ -0,0 +1,8 @@ +_fwutil_completion() { + COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _FWUTIL_COMPLETE=complete $1 ) ) + return 0 +} + +complete -F _fwutil_completion -o default fwutil; diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 472eaa00d4..b91d4b9aec 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -73,6 +73,9 @@ * [NTP](#ntp) * [NTP show commands](#ntp-show-commands) * [NTP config commands](#ntp-config-commands) +* [Platform Component Firmware](#platform-component-firmware) + * [Platform Component Firmware show commands](#platform-component-firmware-show-commands) + * [Platform Component Firmware config commands](#platform-component-firmware-config-commands) * [Platform Specific Commands](#platform-specific-commands) * [PortChannels](#portchannels) * [PortChannel Show commands](#portchannel-show-commands) @@ -3788,6 +3791,172 @@ This command is used to delete a configured NTP server IP address. Go Back To [Beginning of the document](#) or [Beginning of this section](#NTP) +## Platform Component Firmware + +### Platform Component Firmware show commands + +**show platform firmware** + +This command displays platform components firmware status information. + +- Usage: +```bash +show platform firmware +``` + +- Example: +```bash +root@sonic:/home/admin# show platform firmware +Chassis Module Component Version Description +--------- -------- ----------- ----------------------- --------------------------------------- +Chassis1 N/A BIOS 0ACLH004_02.02.007_9600 BIOS - Basic Input/Output System + CPLD 5.3.3.1 CPLD - includes all CPLDs in the switch +``` + +### Platform Component Firmware config commands + +**config platform firmware install** + +This command is used to install a platform component firmware. +Both modular and non modular chassis platforms are supported. + +- Usage: +```bash +config platform firmware install chassis component fw [-y|--yes] +config platform firmware install module component fw [-y|--yes] +``` + +- Example: +```bash +root@sonic:/home/admin# config platform firmware install chassis component BIOS fw /etc/mlnx/fw/sn3800/chassis1/bios.bin +New firmware will be installed, continue? [y/N]: y +Installing firmware: + /etc/mlnx/fw/sn3800/chassis1/bios.bin + +root@sonic:/home/admin# config platform firmware install module Module1 component BIOS fw http://mellanox.com/fw/sn3800/module1/bios.bin +New firmware will be installed, continue? [y/N]: y +Downloading firmware: + [##################################################] 100% +Installing firmware: + /tmp/bios.bin +``` + +Supported options: +1. -y|--yes - automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively + +**config platform firmware update** + +This command is used for automatic FW update of all available platform components. +Both modular and non modular chassis platforms are supported. + +Automatic FW update requires `platform_components.json` to be created and placed at: +sonic-buildimage/device///platform_components.json + +Example: +1. Non modular chassis platform +```json +{ + "chassis": { + "Chassis1": { + "component": { + "BIOS": { + "firmware": "/etc//fw//chassis1/bios.bin", + "version": "0ACLH003_02.02.010", + "info": "Cold reboot is required" + }, + "CPLD": { + "firmware": "/etc//fw//chassis1/cpld.bin", + "version": "10", + "info": "Power cycle is required" + }, + "FPGA": { + "firmware": "/etc//fw//chassis1/fpga.bin", + "version": "5", + "info": "Power cycle is required" + } + } + } + } +} +``` + +2. Modular chassis platform +```json +{ + "chassis": { + "Chassis1": { + "component": { + "BIOS": { + "firmware": "/etc//fw//chassis1/bios.bin", + "version": "0ACLH003_02.02.010", + "info": "Cold reboot is required" + }, + "CPLD": { + "firmware": "/etc//fw//chassis1/cpld.bin", + "version": "10", + "info": "Power cycle is required" + }, + "FPGA": { + "firmware": "/etc//fw//chassis1/fpga.bin", + "version": "5", + "info": "Power cycle is required" + } + } + } + }, + "module": { + "Module1": { + "component": { + "CPLD": { + "firmware": "/etc//fw//module1/cpld.bin", + "version": "10", + "info": "Power cycle is required" + }, + "FPGA": { + "firmware": "/etc//fw//module1/fpga.bin", + "version": "5", + "info": "Power cycle is required" + } + } + } + } +} +``` + +Note: FW update will be skipped if component definition is not provided (e.g., 'BIOS': { }) + +- Usage: +```bash +config platform firmware update [-y|--yes] [-f|--force] [-i|--image=current|next] +``` + +- Example: +```bash +root@sonic:/home/admin# config platform firmware update +Chassis Module Component Firmware Version Status Info +--------- -------- ----------- ------------------------------------- ------------------------------------------------- ------------------ ----------------------- +Chassis1 N/A BIOS /etc/mlnx/fw/sn3800/chassis1/bios.bin 0ACLH004_02.02.007_9600 / 0ACLH004_02.02.007_9600 up-to-date Cold reboot is required + CPLD /etc/mlnx/fw/sn3800/chassis1/cpld.bin 5.3.3.1 / 5.3.3.2 update is required Power cycle is required +New firmware will be installed, continue? [y/N]: y + +Summary: + +Chassis Module Component Status +--------- -------- ----------- ---------- +Chassis1 N/A BIOS up-to-date + CPLD success +``` + +Supported options: +1. -y|--yes - automatic yes to prompts. Assume "yes" as answer to all prompts and run non-interactively +2. -f|--force - install FW regardless the current version +3. -i|--image - update FW using current/next SONiC image + +Note: the default option is --image=current + +Go Back To [Beginning of the document](#) or [Beginning of this section](#platform-component-firmware) + + ## Platform Specific Commands There are few commands that are platform specific. Mellanox has used this feature and implemented Mellanox specific commands as follows. diff --git a/fwutil/__init__.py b/fwutil/__init__.py new file mode 100755 index 0000000000..6ed6f1a885 --- /dev/null +++ b/fwutil/__init__.py @@ -0,0 +1,5 @@ +try: + from sonic_platform.platform import Platform + from . import main +except ImportError as e: + raise ImportError("Required module not found: {}".format(str(e))) diff --git a/fwutil/lib.py b/fwutil/lib.py new file mode 100755 index 0000000000..3f6ecba4f3 --- /dev/null +++ b/fwutil/lib.py @@ -0,0 +1,827 @@ +#!/usr/bin/env python +# +# lib.py +# +# Core library for command-line interface for interacting with platform components within SONiC +# + +try: + import click + import os + import json + import urllib + import subprocess + import sonic_device_util + from collections import OrderedDict + from urlparse import urlparse + from tabulate import tabulate + from log import LogHelper + from . import Platform +except ImportError as e: + raise ImportError("Required module not found: {}".format(str(e))) + +# ========================= Constants ========================================== + +TAB = " " +EMPTY = "" +NA = "N/A" +NEWLINE = "\n" + +# ========================= Variables ========================================== + +log_helper = LogHelper() + +# ========================= Helper classes ===================================== + +class URL(object): + """ + URL + """ + HTTP_PREFIX = [ "http://", "https://" ] + HTTP_CODE_BASE = 100 + HTTP_4XX_CLIENT_ERRORS = 4 + + PB_LABEL = " " + PB_INFO_SEPARATOR = " | " + PB_FULL_TERMINAL_WIDTH = 0 + + TMP_PATH = "/tmp" + + def __init__(self, url): + self.__url = url + self.__pb = None + self.__bytes_num = 0 + + def __str__(self): + return self.__url + + def __reporthook(self, count, block_size, total_size): + if self.__pb is None: + self.__pb = click.progressbar( + label=self.PB_LABEL, + length=total_size, + show_eta=True, + show_percent=True, + info_sep=self.PB_INFO_SEPARATOR, + width=self.PB_FULL_TERMINAL_WIDTH + ) + + self.__pb.update(count * block_size - self.__bytes_num) + self.__bytes_num = count * block_size + + def __pb_reset(self): + if self.__pb: + self.__pb.render_finish() + self.__pb = None + + self.__bytes_num = 0 + + def __validate(self): + # Check basic URL syntax + if not self.__url.startswith(tuple(self.HTTP_PREFIX)): + raise RuntimeError("URL is malformed: did not match expected prefix " + str(self.HTTP_PREFIX)) + + response_code = None + + # Check URL existence + try: + urlfile = urllib.urlopen(self.__url) + response_code = urlfile.getcode() + except IOError: + raise RuntimeError("Did not receive a response from remote machine") + + # Check for a 4xx response code which indicates a nonexistent URL + if response_code / self.HTTP_CODE_BASE == self.HTTP_4XX_CLIENT_ERRORS: + raise RuntimeError("Image file not found on remote machine") + + def get_url(self): + return self.__url + + def is_url(self): + if self.__url.startswith(tuple(self.HTTP_PREFIX)): + return True + + return False + + def retrieve(self): + filename, headers = None, None + + self.__validate() + + result = urlparse(self.__url) + basename = os.path.basename(result.path) + name, extension = os.path.splitext(basename) + + if not extension: + raise RuntimeError("Filename is malformed: did not find an extension") + + try: + filename, headers = urllib.urlretrieve( + self.__url, + "{}/{}".format(self.TMP_PATH, basename), + self.__reporthook + ) + finally: + self.__pb_reset() + + return filename, headers + + url = property(fget=get_url) + + +class PlatformDataProvider(object): + """ + PlatformDataProvider + """ + def __init__(self): + self.__platform = Platform() + self.__chassis = self.__platform.get_chassis() + + self.chassis_component_map = self.__get_chassis_component_map() + self.module_component_map = self.__get_module_component_map() + + def __get_chassis_component_map(self): + chassis_component_map = OrderedDict() + + chassis_name = self.__chassis.get_name() + chassis_component_map[chassis_name] = OrderedDict() + + component_list = self.chassis.get_all_components() + for component in component_list: + component_name = component.get_name() + chassis_component_map[chassis_name][component_name] = component + + return chassis_component_map + + def __get_module_component_map(self): + module_component_map = OrderedDict() + + module_list = self.__chassis.get_all_modules() + for module in module_list: + module_name = module.get_name() + module_component_map[module_name] = OrderedDict() + + component_list = module.get_all_components() + for component in component_list: + component_name = component.get_name() + module_component_map[module_name][component_name] = component + + return module_component_map + + def get_platform(self): + return self.__platform + + def get_chassis(self): + return self.__chassis + + def is_modular_chassis(self): + return len(self.module_component_map) > 0 + + def is_chassis_has_components(self): + return self.__chassis.get_num_components() > 0 + + platform = property(fget=get_platform) + chassis = property(fget=get_chassis) + + +class SquashFs(object): + """ + SquashFs + """ + OS_PREFIX = "SONiC-OS-" + + FS_PATH_TEMPLATE = "/host/image-{}/fs.squashfs" + FS_MOUNTPOINT_TEMPLATE = "/tmp/image-{}-fs" + + def __init__(self): + current_image = self.__get_current_image() + next_image = self.__get_next_image() + + if current_image == next_image: + raise RuntimeError("Next boot image is not set") + + image_stem = next_image.lstrip(self.OS_PREFIX) + + self.fs_path = self.FS_PATH_TEMPLATE.format(image_stem) + self.fs_mountpoint = self.FS_MOUNTPOINT_TEMPLATE.format(image_stem) + + def __get_current_image(self): + cmd = "sonic_installer list | grep 'Current: ' | cut -f2 -d' '" + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + + return output.rstrip(NEWLINE) + + def __get_next_image(self): + cmd = "sonic_installer list | grep 'Next: ' | cut -f2 -d' '" + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) + + return output.rstrip(NEWLINE) + + def mount_next_image_fs(self): + if os.path.ismount(self.fs_mountpoint): + self.umount_next_image_fs() + + os.mkdir(self.fs_mountpoint) + cmd = "mount -t squashfs {} {}".format(self.fs_path, self.fs_mountpoint) + subprocess.check_call(cmd, shell=True) + + return self.fs_mountpoint + + def umount_next_image_fs(self): + if os.path.ismount(self.fs_mountpoint): + cmd = "umount -rf {}".format(self.fs_mountpoint) + subprocess.check_call(cmd, shell=True) + + if os.path.exists(self.fs_mountpoint): + os.rmdir(self.fs_mountpoint) + + +class PlatformComponentsParser(object): + """ + PlatformComponentsParser + """ + PLATFORM_COMPONENTS_FILE = "platform_components.json" + PLATFORM_COMPONENTS_PATH_TEMPLATE = "{}/usr/share/sonic/device/{}/{}" + + CHASSIS_KEY = "chassis" + MODULE_KEY = "module" + COMPONENT_KEY = "component" + FIRMWARE_KEY = "firmware" + VERSION_KEY = "version" + INFO_KEY = "info" + + UTF8_ENCODING = "utf-8" + + def __init__(self, is_modular_chassis): + self.__is_modular_chassis = is_modular_chassis + self.__chassis_component_map = OrderedDict() + self.__module_component_map = OrderedDict() + + def __get_platform_type(self): + return sonic_device_util.get_platform_info( + sonic_device_util.get_machine_info() + ) + + def __get_platform_components_path(self, root_path): + return self.PLATFORM_COMPONENTS_PATH_TEMPLATE.format( + root_path, + self.__get_platform_type(), + self.PLATFORM_COMPONENTS_FILE + ) + + def __is_str(self, obj): + return isinstance(obj, unicode) or isinstance(obj, str) + + def __is_dict(self, obj): + return isinstance(obj, dict) + + def __parser_fail(self, msg): + raise RuntimeError("Failed to parse \"{}\": {}".format(self.PLATFORM_COMPONENTS_FILE, msg)) + + def __parser_platform_fail(self, msg): + self.__parser_fail("invalid platform schema: {}".format(msg)) + + def __parser_chassis_fail(self, msg): + self.__parser_fail("invalid chassis schema: {}".format(msg)) + + def __parser_module_fail(self, msg): + self.__parser_fail("invalid module schema: {}".format(msg)) + + def __parser_component_fail(self, msg): + self.__parser_fail("invalid component schema: {}".format(msg)) + + def __parse_component_section(self, section, component, is_module_component=False): + if not self.__is_dict(component): + self.__parser_component_fail("dictionary is expected: key={}".format(self.COMPONENT_KEY)) + + if not component: + return + + missing_key = None + + for key1, value1 in component.items(): + if not self.__is_dict(value1): + self.__parser_component_fail("dictionary is expected: key={}".format(key1)) + + if not is_module_component: + self.__chassis_component_map[section][key1] = OrderedDict() + else: + self.__module_component_map[section][key1] = OrderedDict() + + if value1: + if len(value1) != 3: + self.__parser_component_fail("unexpected number of records: key={}".format(key1)) + + if self.FIRMWARE_KEY not in value1: + missing_key = self.FIRMWARE_KEY + break + elif self.VERSION_KEY not in value1: + missing_key = self.VERSION_KEY + break + elif self.INFO_KEY not in value1: + missing_key = self.INFO_KEY + break + + for key2, value2 in value1.items(): + if not self.__is_str(value2): + self.__parser_component_fail("string is expected: key={}".format(key2)) + + if not is_module_component: + self.__chassis_component_map[section][key1] = value1 + else: + self.__module_component_map[section][key1] = value1 + + if missing_key is not None: + self.__parser_component_fail("\"{}\" key hasn't been found".format(missing_key)) + + def __parse_chassis_section(self, chassis): + self.__chassis_component_map = OrderedDict() + + if not self.__is_dict(chassis): + self.__parser_chassis_fail("dictionary is expected: key={}".format(self.CHASSIS_KEY)) + + if not chassis: + self.__parser_chassis_fail("dictionary is empty: key={}".format(self.CHASSIS_KEY)) + + if len(chassis) != 1: + self.__parser_chassis_fail("unexpected number of records: key={}".format(self.CHASSIS_KEY)) + + for key, value in chassis.items(): + if not self.__is_dict(value): + self.__parser_chassis_fail("dictionary is expected: key={}".format(key)) + + if not value: + self.__parser_chassis_fail("dictionary is empty: key={}".format(key)) + + if self.COMPONENT_KEY not in value: + self.__parser_chassis_fail("\"{}\" key hasn't been found".format(self.COMPONENT_KEY)) + + if len(value) != 1: + self.__parser_chassis_fail("unexpected number of records: key={}".format(key)) + + self.__chassis_component_map[key] = OrderedDict() + self.__parse_component_section(key, value[self.COMPONENT_KEY]) + + def __parse_module_section(self, module): + self.__module_component_map = OrderedDict() + + if not self.__is_dict(module): + self.__parser_module_fail("dictionary is expected: key={}".format(self.MODULE_KEY)) + + if not module: + self.__parser_module_fail("dictionary is empty: key={}".format(self.MODULE_KEY)) + + for key, value in module.items(): + if not self.__is_dict(value): + self.__parser_module_fail("dictionary is expected: key={}".format(key)) + + if not value: + self.__parser_module_fail("dictionary is empty: key={}".format(key)) + + if self.COMPONENT_KEY not in value: + self.__parser_module_fail("\"{}\" key hasn't been found".format(self.COMPONENT_KEY)) + + if len(value) != 1: + self.__parser_module_fail("unexpected number of records: key={}".format(key)) + + self.__module_component_map[key] = OrderedDict() + self.__parse_component_section(key, value[self.COMPONENT_KEY], True) + + def __deunicodify_hook(self, pairs): + new_pairs = [ ] + + for key, value in pairs: + if isinstance(key, unicode): + key = key.encode(self.UTF8_ENCODING) + + if isinstance(value, unicode): + value = value.encode(self.UTF8_ENCODING) + + new_pairs.append((key, value)) + + return OrderedDict(new_pairs) + + def get_chassis_component_map(self): + return self.__chassis_component_map + + def get_module_component_map(self): + return self.__module_component_map + + def parse_platform_components(self, root_path=None): + platform_components_path = None + + if root_path is None: + platform_components_path = self.__get_platform_components_path(EMPTY) + else: + platform_components_path = self.__get_platform_components_path(root_path) + + with open(platform_components_path) as platform_components: + data = json.load(platform_components, object_pairs_hook=self.__deunicodify_hook) + + if not self.__is_dict(data): + self.__parser_platform_fail("dictionary is expected: key=root") + + if not data: + self.__parser_platform_fail("dictionary is empty: key=root") + + if self.CHASSIS_KEY not in data: + self.__parser_platform_fail("\"{}\" key hasn't been found".format(self.CHASSIS_KEY)) + + if not self.__is_modular_chassis: + if len(data) != 1: + self.__parser_platform_fail("unexpected number of records: key=root") + + self.__parse_chassis_section(data[self.CHASSIS_KEY]) + + if self.__is_modular_chassis: + if self.MODULE_KEY not in data: + self.__parser_platform_fail("\"{}\" key hasn't been found".format(self.MODULE_KEY)) + + if len(data) != 2: + self.__parser_platform_fail("unexpected number of records: key=root") + + self.__parse_module_section(data[self.MODULE_KEY]) + + chassis_component_map = property(fget=get_chassis_component_map) + module_component_map = property(fget=get_module_component_map) + + +class ComponentUpdateProvider(PlatformDataProvider): + """ + ComponentUpdateProvider + """ + STATUS_HEADER = [ "Chassis", "Module", "Component", "Firmware", "Version", "Status", "Info" ] + RESULT_HEADER = [ "Chassis", "Module", "Component", "Status" ] + FORMAT = "simple" + + FW_STATUS_UPDATE_SUCCESS = "success" + FW_STATUS_UPDATE_FAILURE = "failure" + FW_STATUS_UPDATE_REQUIRED = "update is required" + FW_STATUS_UP_TO_DATE = "up-to-date" + + SECTION_CHASSIS = "Chassis" + SECTION_MODULE = "Module" + + def __init__(self, root_path=None): + PlatformDataProvider.__init__(self) + + self.__root_path = root_path + + self.__pcp = PlatformComponentsParser(self.is_modular_chassis()) + self.__pcp.parse_platform_components(root_path) + + self.__validate_platform_schema(self.__pcp) + + def __diff_keys(self, keys1, keys2): + return set(keys1) ^ set(keys2) + + def __validate_component_map(self, section, pdp_map, pcp_map): + diff_keys = self.__diff_keys(pdp_map.keys(), pcp_map.keys()) + + if diff_keys: + raise RuntimeError( + "{} names mismatch: keys={}".format( + section, + str(list(diff_keys)) + ) + ) + + for key in pdp_map.keys(): + diff_keys = self.__diff_keys(pdp_map[key].keys(), pcp_map[key].keys()) + + if diff_keys: + raise RuntimeError( + "{} component names mismatch: keys={}".format( + section, + str(list(diff_keys)) + ) + ) + + def __validate_platform_schema(self, pcp): + self.__validate_component_map( + self.SECTION_CHASSIS, + self.chassis_component_map, + pcp.chassis_component_map + ) + + self.__validate_component_map( + self.SECTION_MODULE, + self.module_component_map, + pcp.module_component_map + ) + + def get_status(self, force): + status_table = [ ] + + append_chassis_name = self.is_chassis_has_components() + append_module_na = not self.is_modular_chassis() + + for chassis_name, chassis_component_map in self.chassis_component_map.items(): + for chassis_component_name, chassis_component in chassis_component_map.items(): + component = self.__pcp.chassis_component_map[chassis_name][chassis_component_name] + + firmware_path = NA + firmware_version_current = chassis_component.get_firmware_version() + firmware_version_available = NA + firmware_version = firmware_version_current + + status = self.FW_STATUS_UP_TO_DATE + info = NA + + if append_chassis_name: + append_chassis_name = False + else: + chassis_name = EMPTY + + if append_module_na: + module_name = NA + append_module_na = False + else: + module_name = EMPTY + + if component: + firmware_path = component[self.__pcp.FIRMWARE_KEY] + firmware_version_available = component[self.__pcp.VERSION_KEY] + firmware_version = "{} / {}".format(firmware_version_current, firmware_version_available) + info = component[self.__pcp.INFO_KEY] + + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path + + if force or firmware_version_current != firmware_version_available: + status = self.FW_STATUS_UPDATE_REQUIRED + + status_table.append( + [ + chassis_name, + module_name, + chassis_component_name, + firmware_path, + firmware_version, + status, + info + ] + ) + + append_chassis_name = not self.is_chassis_has_components() + + if self.is_modular_chassis(): + for module_name, module_component_map in self.module_component_map.items(): + append_module_name = True + for module_component_name, module_component in module_component_map.items(): + component = self.__pcp.module_component_map[module_name][module_component_name] + + firmware_path = NA + firmware_version_current = module_component.get_firmware_version() + firmware_version_available = NA + firmware_version = firmware_version_current + + status = self.FW_STATUS_UP_TO_DATE + info = NA + + if append_chassis_name: + chassis_name = self.chassis.get_name() + append_chassis_name = False + else: + chassis_name = EMPTY + + if append_module_name: + append_module_name = False + else: + module_name = EMPTY + + if component: + firmware_path = component[self.__pcp.FIRMWARE_KEY] + firmware_version_available = component[self.__pcp.VERSION_KEY] + firmware_version = "{} / {}".format(firmware_version_current, firmware_version_available) + info = component[self.__pcp.INFO_KEY] + + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path + + if force or firmware_version_current != firmware_version_available: + status = self.FW_STATUS_UPDATE_REQUIRED + + status_table.append( + [ + chassis_name, + module_name, + module_component_name, + firmware_path, + firmware_version, + status, + info + ] + ) + + return tabulate(status_table, self.STATUS_HEADER, tablefmt=self.FORMAT) + + def update_firmware(self, force): + status_table = [ ] + + append_chassis_name = self.is_chassis_has_components() + append_module_na = not self.is_modular_chassis() + + for chassis_name, chassis_component_map in self.chassis_component_map.items(): + for chassis_component_name, chassis_component in chassis_component_map.items(): + component = self.__pcp.chassis_component_map[chassis_name][chassis_component_name] + component_path = "{}/{}".format( + chassis_name, + chassis_component_name + ) + + firmware_version_current = chassis_component.get_firmware_version() + firmware_version_available = NA + + status = self.FW_STATUS_UP_TO_DATE + + if append_chassis_name: + append_chassis_name = False + else: + chassis_name = EMPTY + + if append_module_na: + module_name = NA + append_module_na = False + else: + module_name = EMPTY + + if component: + firmware_path = component[self.__pcp.FIRMWARE_KEY] + firmware_version_available = component[self.__pcp.VERSION_KEY] + + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path + + if force or firmware_version_current != firmware_version_available: + result = False + + try: + click.echo("Installing firmware:") + click.echo(TAB + firmware_path) + + log_helper.log_fw_install_start(component_path, firmware_path) + + if not os.path.exists(firmware_path): + raise RuntimeError("Path \"{}\" does not exist".format(firmware_path)) + + result = chassis_component.install_firmware(firmware_path) + log_helper.log_fw_install_end(component_path, firmware_path, result) + except Exception as e: + log_helper.log_fw_install_end(component_path, firmware_path, False, e) + log_helper.print_error(str(e)) + + status = self.FW_STATUS_UPDATE_SUCCESS if result else self.FW_STATUS_UPDATE_FAILURE + + status_table.append( + [ + chassis_name, + module_name, + chassis_component_name, + status, + ] + ) + + append_chassis_name = not self.is_chassis_has_components() + + if self.is_modular_chassis(): + for module_name, module_component_map in self.module_component_map.items(): + append_module_name = True + + for module_component_name, module_component in module_component_map.items(): + component = self.__pcp.module_component_map[module_name][module_component_name] + component_path = "{}/{}/{}".format( + self.chassis.get_name(), + module_name, + module_component_name + ) + + firmware_version_current = module_component.get_firmware_version() + firmware_version_available = NA + + status = self.FW_STATUS_UP_TO_DATE + + if append_chassis_name: + chassis_name = self.chassis.get_name() + append_chassis_name = False + else: + chassis_name = EMPTY + + if append_module_name: + append_module_name = False + else: + module_name = EMPTY + + if component: + firmware_path = component[self.__pcp.FIRMWARE_KEY] + firmware_version_available = component[self.__pcp.VERSION_KEY] + + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path + + if force or firmware_version_current != firmware_version_available: + result = False + + try: + click.echo("Installing firmware:") + click.echo(TAB + firmware_path) + + log_helper.log_fw_install_start(component_path, firmware_path) + + if not os.path.exists(firmware_path): + raise RuntimeError("Path \"{}\" does not exist".format(firmware_path)) + + result = module_component.install_firmware(firmware_path) + log_helper.log_fw_install_end(component_path, firmware_path, result) + except Exception as e: + log_helper.log_fw_install_end(component_path, firmware_path, False, e) + log_helper.print_error(str(e)) + + status = self.FW_STATUS_UPDATE_SUCCESS if result else self.FW_STATUS_UPDATE_FAILURE + + status_table.append( + [ + chassis_name, + module_name, + module_component_name, + status, + ] + ) + + return tabulate(status_table, self.RESULT_HEADER, tablefmt=self.FORMAT) + + +class ComponentStatusProvider(PlatformDataProvider): + """ + ComponentStatusProvider + """ + HEADER = [ "Chassis", "Module", "Component", "Version", "Description" ] + FORMAT = "simple" + + def __init__(self): + PlatformDataProvider.__init__(self) + + def get_status(self): + status_table = [ ] + + append_chassis_name = self.is_chassis_has_components() + append_module_na = not self.is_modular_chassis() + + for chassis_name, chassis_component_map in self.chassis_component_map.items(): + for chassis_component_name, chassis_component in chassis_component_map.items(): + firmware_version = chassis_component.get_firmware_version() + description = chassis_component.get_description() + + if append_chassis_name: + append_chassis_name = False + else: + chassis_name = EMPTY + + if append_module_na: + module_name = NA + append_module_na = False + else: + module_name = EMPTY + + status_table.append( + [ + chassis_name, + module_name, + chassis_component_name, + firmware_version, + description + ] + ) + + append_chassis_name = not self.is_chassis_has_components() + + if self.is_modular_chassis(): + for module_name, module_component_map in self.module_component_map.items(): + append_module_name = True + + for module_component_name, module_component in module_component_map.items(): + firmware_version = module_component.get_firmware_version() + description = module_component.get_description() + + if append_chassis_name: + chassis_name = self.chassis.get_name() + append_chassis_name = False + else: + chassis_name = EMPTY + + if append_module_name: + append_module_name = False + else: + module_name = EMPTY + + status_table.append( + [ + chassis_name, + module_name, + module_component_name, + firmware_version, + description + ] + ) + + return tabulate(status_table, self.HEADER, tablefmt=self.FORMAT) diff --git a/fwutil/log.py b/fwutil/log.py new file mode 100755 index 0000000000..0580e4bb27 --- /dev/null +++ b/fwutil/log.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python +# +# log.py +# +# Logging library for command-line interface for interacting with platform components within SONiC +# + +try: + import click + import syslog +except ImportError as e: + raise ImportError("Required module not found: {}".format(str(e))) + +# ========================= Constants ========================================== + +SYSLOG_IDENTIFIER = "fwutil" + +# ========================= Helper classes ===================================== + +class SyslogLogger(object): + """ + SyslogLogger + """ + def __init__(self, identifier): + self.__syslog = syslog + + self.__syslog.openlog( + ident=identifier, + logoption=self.__syslog.LOG_NDELAY, + facility=self.__syslog.LOG_USER + ) + + def __del__(self): + self.__syslog.closelog() + + def log_error(self, msg): + self.__syslog.syslog(self.__syslog.LOG_ERR, msg) + + def log_warning(self, msg): + self.__syslog.syslog(self.__syslog.LOG_WARNING, msg) + + def log_notice(self, msg): + self.__syslog.syslog(self.__syslog.LOG_NOTICE, msg) + + def log_info(self, msg): + self.__syslog.syslog(self.__syslog.LOG_INFO, msg) + + def log_debug(self, msg): + self.__syslog.syslog(self.__syslog.LOG_DEBUG, msg) + + +logger = SyslogLogger(SYSLOG_IDENTIFIER) + + +class LogHelper(object): + """ + LogHelper + """ + FW_ACTION_DOWNLOAD = "download" + FW_ACTION_INSTALL = "install" + + STATUS_SUCCESS = "success" + STATUS_FAILURE = "failure" + + def __log_fw_action_start(self, action, component, firmware): + caption = "Firmware {} started".format(action) + template = "{}: component={}, firmware={}" + + logger.log_info( + template.format( + caption, + component, + firmware + ) + ) + + def __log_fw_action_end(self, action, component, firmware, status, exception=None): + caption = "Firmware {} ended".format(action) + + status_template = "{}: component={}, firmware={}, status={}" + exception_template = "{}: component={}, firmware={}, status={}, exception={}" + + if status: + logger.log_info( + status_template.format( + caption, + component, + firmware, + self.STATUS_SUCCESS + ) + ) + else: + if exception is None: + logger.log_error( + status_template.format( + caption, + component, + firmware, + self.STATUS_FAILURE + ) + ) + else: + logger.log_error( + exception_template.format( + caption, + component, + firmware, + self.STATUS_FAILURE, + str(exception) + ) + ) + + def log_fw_download_start(self, component, firmware): + self.__log_fw_action_start(self.FW_ACTION_DOWNLOAD, component, firmware) + + def log_fw_download_end(self, component, firmware, status, exception=None): + self.__log_fw_action_end(self.FW_ACTION_DOWNLOAD, component, firmware, status, exception) + + def log_fw_install_start(self, component, firmware): + self.__log_fw_action_start(self.FW_ACTION_INSTALL, component, firmware) + + def log_fw_install_end(self, component, firmware, status, exception=None): + self.__log_fw_action_end(self.FW_ACTION_INSTALL, component, firmware, status, exception) + + def print_error(self, msg): + click.echo("Error: {}.".format(msg)) diff --git a/fwutil/main.py b/fwutil/main.py new file mode 100755 index 0000000000..c1443627c1 --- /dev/null +++ b/fwutil/main.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with platform components within SONiC +# + +try: + import click + import os + from lib import PlatformDataProvider, ComponentStatusProvider, ComponentUpdateProvider + from lib import URL, SquashFs + from log import LogHelper +except ImportError as e: + raise ImportError("Required module not found: {}".format(str(e))) + +# ========================= Constants ========================================== + +VERSION = '1.0.0.0' + +CHASSIS_NAME_CTX_KEY = "chassis_name" +MODULE_NAME_CTX_KEY = "module_name" +COMPONENT_CTX_KEY = "component" +COMPONENT_PATH_CTX_KEY = "component_path" +URL_CTX_KEY = "url" + +TAB = " " +PATH_SEPARATOR = "/" +IMAGE_NEXT = "next" +HELP = "?" + +EXIT_SUCCESS = 0 +EXIT_FAILURE = 1 + +ROOT_UID = 0 + +# ========================= Variables ========================================== + +pdp = PlatformDataProvider() +log_helper = LogHelper() + +# ========================= Helper functions =================================== + +def cli_show_help(ctx): + click.echo(ctx.get_help()) + ctx.exit(EXIT_SUCCESS) + + +def cli_abort(ctx, msg): + click.echo("Error: " + msg + ". Aborting...") + ctx.abort() + + +def cli_init(ctx): + if os.geteuid() != ROOT_UID: + cli_abort(ctx, "Root privileges are required") + + ctx.ensure_object(dict) + +# ========================= CLI commands and groups ============================ + +# 'fwutil' command main entrypoint +@click.group() +@click.pass_context +def cli(ctx): + """fwutil - Command-line utility for interacting with platform components""" + + cli_init(ctx) + + +# 'install' group +@cli.group() +@click.pass_context +def install(ctx): + """Install platform firmware""" + ctx.obj[COMPONENT_PATH_CTX_KEY] = [ ] + + +# 'chassis' subgroup +@click.group() +@click.pass_context +def chassis(ctx): + """Install chassis firmware""" + ctx.obj[CHASSIS_NAME_CTX_KEY] = pdp.chassis.get_name() + ctx.obj[COMPONENT_PATH_CTX_KEY].append(pdp.chassis.get_name()) + + +def validate_module(ctx, param, value): + if value == HELP: + cli_show_help(ctx) + + if not pdp.is_modular_chassis(): + ctx.fail("Unsupported platform: non modular chassis.") + + if value not in pdp.module_component_map: + ctx.fail("Invalid value for \"{}\": Module \"{}\" does not exist.".format(param.metavar, value)) + + return value + + +# 'module' subgroup +@click.group() +@click.argument('module_name', metavar='', callback=validate_module) +@click.pass_context +def module(ctx, module_name): + """Install module firmware""" + ctx.obj[MODULE_NAME_CTX_KEY] = module_name + ctx.obj[COMPONENT_PATH_CTX_KEY].append(pdp.chassis.get_name()) + ctx.obj[COMPONENT_PATH_CTX_KEY].append(module_name) + + +def validate_component(ctx, param, value): + if value == HELP: + cli_show_help(ctx) + + if CHASSIS_NAME_CTX_KEY in ctx.obj: + chassis_name = ctx.obj[CHASSIS_NAME_CTX_KEY] + if value in pdp.chassis_component_map[chassis_name]: + ctx.obj[COMPONENT_CTX_KEY] = pdp.chassis_component_map[chassis_name][value] + return value + + if MODULE_NAME_CTX_KEY in ctx.obj: + module_name = ctx.obj[MODULE_NAME_CTX_KEY] + if value in pdp.module_component_map[module_name]: + ctx.obj[COMPONENT_CTX_KEY] = pdp.module_component_map[module_name][value] + return value + + ctx.fail("Invalid value for \"{}\": Component \"{}\" does not exist.".format(param.metavar, value)) + + +# 'component' subgroup +@click.group() +@click.argument('component_name', metavar='', callback=validate_component) +@click.pass_context +def component(ctx, component_name): + """Install component firmware""" + ctx.obj[COMPONENT_PATH_CTX_KEY].append(component_name) + + +def install_fw(ctx, fw_path): + component = ctx.obj[COMPONENT_CTX_KEY] + component_path = PATH_SEPARATOR.join(ctx.obj[COMPONENT_PATH_CTX_KEY]) + + status = False + + try: + click.echo("Installing firmware:") + click.echo(TAB + fw_path) + log_helper.log_fw_install_start(component_path, fw_path) + status = component.install_firmware(fw_path) + log_helper.log_fw_install_end(component_path, fw_path, status) + except Exception as e: + log_helper.log_fw_install_end(component_path, fw_path, False, e) + cli_abort(ctx, str(e)) + + if not status: + log_helper.print_error("Firmware install failed") + ctx.exit(EXIT_FAILURE) + + +def download_fw(ctx, url): + filename, headers = None, None + + component_path = PATH_SEPARATOR.join(ctx.obj[COMPONENT_PATH_CTX_KEY]) + + try: + click.echo("Downloading firmware:") + log_helper.log_fw_download_start(component_path, str(url)) + filename, headers = url.retrieve() + log_helper.log_fw_download_end(component_path, str(url), True) + except Exception as e: + log_helper.log_fw_download_end(component_path, str(url), False, e) + cli_abort(ctx, str(e)) + + return filename + + +def validate_fw(ctx, param, value): + if value == HELP: + cli_show_help(ctx) + + url = URL(value) + + if not url.is_url(): + path = click.Path(exists=True) + path.convert(value, param, ctx) + else: + ctx.obj[URL_CTX_KEY] = url + + return value + + +# 'fw' subcommand +@component.command() +@click.option('-y', '--yes', 'yes', is_flag=True, show_default=True, help="Assume \"yes\" as answer to all prompts and run non-interactively") +@click.argument('fw_path', metavar='', callback=validate_fw) +@click.pass_context +def fw(ctx, yes, fw_path): + """Install firmware from local binary or URL""" + if not yes: + click.confirm("New firmware will be installed, continue?", abort=True) + + url = None + + if URL_CTX_KEY in ctx.obj: + url = ctx.obj[URL_CTX_KEY] + fw_path = download_fw(ctx, url) + + try: + install_fw(ctx, fw_path) + finally: + if url is not None and os.path.exists(fw_path): + os.remove(fw_path) + + +# 'update' subgroup +@cli.command() +@click.option('-y', '--yes', 'yes', is_flag=True, show_default=True, help="Assume \"yes\" as answer to all prompts and run non-interactively") +@click.option('-f', '--force', 'force', is_flag=True, show_default=True, help="Install firmware regardless the current version") +@click.option('-i', '--image', 'image', type=click.Choice(["current", "next"]), default="current", show_default=True, help="Update firmware using current/next image") +@click.pass_context +def update(ctx, yes, force, image): + """Update platform firmware""" + aborted = False + + try: + squashfs = None + + try: + cup = None + + if image == IMAGE_NEXT: + squashfs = SquashFs() + fs_path = squashfs.mount_next_image_fs() + cup = ComponentUpdateProvider(fs_path) + else: + cup = ComponentUpdateProvider() + + click.echo(cup.get_status(force)) + + if not yes: + click.confirm("New firmware will be installed, continue?", abort=True) + + result = cup.update_firmware(force) + + click.echo() + click.echo("Summary:") + click.echo() + + click.echo(result) + except click.Abort: + aborted = True + except Exception as e: + aborted = True + click.echo("Error: " + str(e) + ". Aborting...") + + if image == IMAGE_NEXT and squashfs is not None: + squashfs.umount_next_image_fs() + except Exception as e: + cli_abort(ctx, str(e)) + + if aborted: + ctx.abort() + + +# 'show' subgroup +@cli.group() +def show(): + """Display platform info""" + pass + + +# 'status' subcommand +@show.command() +@click.pass_context +def status(ctx): + """Show platform components status""" + try: + csp = ComponentStatusProvider() + click.echo(csp.get_status()) + except Exception as e: + cli_abort(ctx, str(e)) + + +# 'version' subcommand +@show.command() +def version(): + """Show utility version""" + click.echo("fwutil version {0}".format(VERSION)) + +install.add_command(chassis) +install.add_command(module) + +chassis.add_command(component) +module.add_command(component) + +# ========================= CLI entrypoint ===================================== + +if __name__ == '__main__': + cli() diff --git a/setup.py b/setup.py index 766707e451..47bd6e2fb0 100644 --- a/setup.py +++ b/setup.py @@ -41,6 +41,7 @@ 'ssdutil', 'pfc', 'psuutil', + 'fwutil', 'pddf_fanutil', 'pddf_psuutil', 'pddf_thermalutil', @@ -120,6 +121,7 @@ 'ssdutil = ssdutil.main:ssdutil', 'pfc = pfc.main:cli', 'psuutil = psuutil.main:cli', + 'fwutil = fwutil.main:cli', 'pddf_fanutil = pddf_fanutil.main:cli', 'pddf_psuutil = pddf_psuutil.main:cli', 'pddf_thermalutil = pddf_thermalutil.main:cli', diff --git a/show/main.py b/show/main.py index e15dd2465c..4a52306044 100755 --- a/show/main.py +++ b/show/main.py @@ -1684,6 +1684,13 @@ def temperature(): cmd = 'tempershow' run_command(cmd) +# 'firmware' subcommand ("show platform firmware") +@platform.command() +def firmware(): + """Show firmware status information""" + cmd = "fwutil show status" + run_command(cmd) + # # 'logging' command ("show logging") # From 18d2c659f23e3a142152eef24c78132ac3a86677 Mon Sep 17 00:00:00 2001 From: Oleksandr Kuzovych <59696363+okuzovych@users.noreply.github.com> Date: Fri, 21 Feb 2020 01:54:36 +0200 Subject: [PATCH 002/111] [config] Add 'config interface mtu' command (#793) --- config/main.py | 21 +++++++++++++++++++++ doc/Command-Reference.md | 16 ++++++++++++++++ scripts/portconfig | 14 ++++++++++++-- 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index c54ce39940..8fa54ea9e6 100755 --- a/config/main.py +++ b/config/main.py @@ -1424,6 +1424,27 @@ def mgmt_ip_restart_services(): cmd="systemctl restart ntp-config" os.system (cmd) +# +# 'mtu' subcommand +# + +@interface.command() +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('interface_mtu', metavar='', required=True) +@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") +def mtu(ctx, interface_name, interface_mtu, verbose): + """Set interface mtu""" + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + command = "portconfig -p {} -m {}".format(interface_name, interface_mtu) + if verbose: + command += " -vv" + run_command(command, display_cmd=verbose) + # # 'ip' subgroup ('config interface ip ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index b91d4b9aec..cbac2b2ba8 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2563,6 +2563,22 @@ Dynamic breakout feature is yet to be supported in SONiC and hence uses cannot c admin@sonic:~$ sudo config interface Ethernet63 speed 40000 ``` +**config interface mtu (Versions >= 201904)** + +This command is used to configure the mtu for the Physical interface. Use the value 1500 for setting max transfer unit size to 1500 bytes. + +- Usage: + + *Versions >= 201904* + ``` + config interface mtu + ``` + +- Example (Versions >= 201904): + ``` + admin@sonic:~$ sudo config interface mtu Ethernet64 1500 + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#interfaces) diff --git a/scripts/portconfig b/scripts/portconfig index 3d88464c81..07b4828a29 100755 --- a/scripts/portconfig +++ b/scripts/portconfig @@ -2,7 +2,7 @@ """ portconfig is the utility to show and change ECN configuration -usage: portconfig [-h] [-v] [-s] [-f] [-p PROFILE] [-gmin GREEN_MIN] +usage: portconfig [-h] [-v] [-s] [-f] [-m] [-p PROFILE] [-gmin GREEN_MIN] [-gmax GREEN_MAX] [-ymin YELLOW_MIN] [-ymax YELLOW_MAX] [-rmin RED_MIN] [-rmax RED_MAX] [-vv] @@ -13,6 +13,7 @@ optional arguments: -p --port port name -s --speed port speed in Mbits -f --fec port fec mode + -m --mtu port mtu in bytes """ from __future__ import print_function @@ -25,6 +26,7 @@ import swsssdk PORT_TABLE_NAME = "PORT" PORT_SPEED_CONFIG_FIELD_NAME = "speed" PORT_FEC_CONFIG_FIELD_NAME = "fec" +PORT_MTU_CONFIG_FIELD_NAME = "mtu" class portconfig(object): """ @@ -57,6 +59,11 @@ class portconfig(object): print("Setting fec %s on port %s" % (fec, port)) self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_FEC_CONFIG_FIELD_NAME: fec}) + def set_mtu(self, port, mtu): + if self.verbose: + print("Setting mtu %s on port %s" % (mtu, port)) + self.db.mod_entry(PORT_TABLE_NAME, port, {PORT_MTU_CONFIG_FIELD_NAME: mtu}) + def main(): parser = argparse.ArgumentParser(description='Set SONiC port parameters', version='1.0.0', @@ -65,6 +72,7 @@ def main(): parser.add_argument('-l', '--list', action='store_true', help='list port parametars', default=False) parser.add_argument('-s', '--speed', type=int, help='port speed value in Mbit', default=None) parser.add_argument('-f', '--fec', type=str, help='port fec mode value in (none, rs, fc)', default=None) + parser.add_argument('-m', '--mtu', type=int, help='port mtu value in bytes', default=None) parser.add_argument('-vv', '--verbose', action='store_true', help='Verbose output', default=False) args = parser.parse_args() @@ -72,11 +80,13 @@ def main(): port = portconfig(args.verbose, args.port) if args.list: port.list_params(args.port) - elif args.speed or args.fec: + elif args.speed or args.fec or args.mtu: if args.speed: port.set_speed(args.port, args.speed) if args.fec: port.set_fec(args.port, args.fec) + if args.mtu: + port.set_mtu(args.port, args.mtu) else: parser.print_help() sys.exit(1) From 70210f124e87fb3e66d980f94533bc73b5308efb Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Thu, 20 Feb 2020 22:40:41 -0800 Subject: [PATCH 003/111] Added check if service existis before start/stop/restart of service. (#815) This is possible if some of docker are not built then corresponding service file will not be present --- config/main.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/config/main.py b/config/main.py index 8fa54ea9e6..ba99ec8d95 100755 --- a/config/main.py +++ b/config/main.py @@ -23,6 +23,7 @@ CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help', '-?']) +SONIC_GENERATED_SERVICE_PATH = '/etc/sonic/generated_services.conf' SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' SYSLOG_IDENTIFIER = "config" VLAN_SUB_INTERFACE_SEPARATOR = '.' @@ -394,6 +395,15 @@ def _get_platform(): return tokens[1].strip() return '' +def _get_sonic_generated_services(): + if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH): + return None + generated_services_list = [] + with open(SONIC_GENERATED_SERVICE_PATH) as generated_service_file: + for line in generated_service_file: + generated_services_list.append(line.rstrip('\n')) + return None if not generated_services_list else generated_services_list + # Callback for confirmation prompt. Aborts if user enters "n" def _abort_if_false(ctx, param, value): if not value: @@ -409,10 +419,18 @@ def _stop_services(): 'hostcfgd', 'nat' ] + generated_services_list = _get_sonic_generated_services() + + if generated_services_list is None: + log_error("Failed to get generated services") + return + if asic_type == 'mellanox' and 'pmon' in services_to_stop: services_to_stop.remove('pmon') for service in services_to_stop: + if service + '.service' not in generated_services_list: + continue try: click.echo("Stopping service {} ...".format(service)) run_command("systemctl stop {}".format(service)) @@ -440,7 +458,15 @@ def _reset_failed_services(): 'nat' ] + generated_services_list = _get_sonic_generated_services() + + if generated_services_list is None: + log_error("Failed to get generated services") + return + for service in services_to_reset: + if service + '.service' not in generated_services_list: + continue try: click.echo("Resetting failed status for service {} ...".format(service)) run_command("systemctl reset-failed {}".format(service)) @@ -463,10 +489,18 @@ def _restart_services(): 'nat', 'sflow', ] + generated_services_list = _get_sonic_generated_services() + + if generated_services_list is None: + log_error("Failed to get generated services") + return + if asic_type == 'mellanox' and 'pmon' in services_to_restart: services_to_restart.remove('pmon') for service in services_to_restart: + if service + '.service' not in generated_services_list: + continue try: click.echo("Restarting service {} ...".format(service)) run_command("systemctl restart {}".format(service)) From 0dd758c70487b38c1ff977cb9f4422cdd473c6f2 Mon Sep 17 00:00:00 2001 From: Akhilesh Samineni <47657796+AkhileshSamineni@users.noreply.github.com> Date: Fri, 21 Feb 2020 12:12:17 +0530 Subject: [PATCH 004/111] Fix for dhcp_relay deletion on a VLAN (#813) Signed-off-by: Akhilesh Samineni --- config/main.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index ba99ec8d95..a269420f10 100755 --- a/config/main.py +++ b/config/main.py @@ -1193,7 +1193,8 @@ def add_vlan_dhcp_relay_destination(ctx, vid, dhcp_relay_destination_ip): return else: dhcp_relay_dests.append(dhcp_relay_destination_ip) - db.set_entry('VLAN', vlan_name, {"dhcp_servers":dhcp_relay_dests}) + vlan['dhcp_servers'] = dhcp_relay_dests + db.set_entry('VLAN', vlan_name, vlan) click.echo("Added DHCP relay destination address {} to {}".format(dhcp_relay_destination_ip, vlan_name)) try: click.echo("Restarting DHCP relay service...") @@ -1218,7 +1219,11 @@ def del_vlan_dhcp_relay_destination(ctx, vid, dhcp_relay_destination_ip): dhcp_relay_dests = vlan.get('dhcp_servers', []) if dhcp_relay_destination_ip in dhcp_relay_dests: dhcp_relay_dests.remove(dhcp_relay_destination_ip) - db.set_entry('VLAN', vlan_name, {"dhcp_servers":dhcp_relay_dests}) + if len(dhcp_relay_dests) == 0: + del vlan['dhcp_servers'] + else: + vlan['dhcp_servers'] = dhcp_relay_dests + db.set_entry('VLAN', vlan_name, vlan) click.echo("Removed DHCP relay destination address {} from {}".format(dhcp_relay_destination_ip, vlan_name)) try: click.echo("Restarting DHCP relay service...") From 474c6a15269e5e9460d99351ba81881ad1ffde2c Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Fri, 21 Feb 2020 07:57:08 -0800 Subject: [PATCH 005/111] [Command-Reference.md] Unify Usage statments and Examples (including sample prompts) (#816) --- doc/Command-Reference.md | 257 ++++++++++++++++++--------------------- 1 file changed, 118 insertions(+), 139 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index cbac2b2ba8..8fb8b23eb0 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -338,7 +338,7 @@ The same syntax applies to all subgroups of `show` which themselves contain subc - Example: ``` - user@debug:~$ show interfaces -? + admin@sonic:~$ show interfaces -? Show details of the network interfaces @@ -645,7 +645,7 @@ Couple of example outputs are given below. ``` ``` - admin@arc-switch1025:~$ show platform syseeprom + admin@sonic:~$ show platform syseeprom TlvInfo Header: Id String: TlvInfo Version: 1 @@ -683,7 +683,7 @@ This command displays health parameters of the device's SSD - Example: ``` - root@arc-switch1029:/home/admin# show platform ssdhealth + admin@sonic:~$ show platform ssdhealth Device Model : M.2 (S42) 3IE3 Health : 99.665% Temperature : 30C @@ -832,9 +832,7 @@ When this is disabled and if the authentication request fails on first server, a - Example: ``` - admin@sonic:~$ sudo -i - root@sonic:~# config aaa authentication failthrough enable - root@sonic:~# + admin@sonic:~$ sudo config aaa authentication failthrough enable ``` **aaa authentication fallback** @@ -848,8 +846,7 @@ When the tacacs+ authentication fails, it falls back to local authentication by - Example: ``` - root@sonic:~# config aaa authentication fallback enable - root@sonic:~# + admin@sonic:~$ sudo config aaa authentication fallback enable ``` **aaa authentication login** @@ -874,8 +871,7 @@ If the authentication fails, AAA will check the "failthrough" configuration and - Example: ``` - root@sonic:~# config aaa authentication login tacacs+ - root@sonic:~# + admin@sonic:~$ sudo config aaa authentication login tacacs+ ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#aaa--tacacs) @@ -948,8 +944,7 @@ When this command is executed, the configured tacacs+ server addresses are updat - Example: ``` - root@T1-2:~# config tacacs add 10.11.12.13 -t 10 -k testing789 -a mschap -o 50 -p 9 - root@T1-2:~# + admin@sonic:~$ sudo config tacacs add 10.11.12.13 -t 10 -k testing789 -a mschap -o 50 -p 9 ``` - Example Server Configuration in /etc/pam.d/common-auth-sonic configuration file: @@ -976,8 +971,7 @@ This command is used to delete the tacacs+ servers configured. - Example: ``` - root@T1-2:~# config tacacs delete 10.11.12.13 - root@T1-2:~# + admin@sonic:~$ sudo config tacacs delete 10.11.12.13 ``` **config tacacs authtype** @@ -992,8 +986,7 @@ When user has not configured server specific authtype, this global value shall b - Example: ``` - root@T1-2:~# config tacacs authtype mschap - root@T1-2:~# + admin@sonic:~$ sudo config tacacs authtype mschap ``` **config tacacs default** @@ -1008,8 +1001,7 @@ Default for authtype is "pap", default for passkey is EMPTY_STRING and default f - Example (This will reset the global authtype back to the default value "pap"): ``` - root@T1-2:~# config tacacs default authtype - root@T1-2:~# + admin@sonic:~$ sudo config tacacs default authtype ``` **config tacacs passkey** @@ -1024,8 +1016,7 @@ When user has not configured server specific passkey, this global value shall be - Example: ``` - root@T1-2:~# config tacacs passkey testing123 - root@T1-2:~# + admin@sonic:~$ sudo config tacacs passkey testing123 ``` **config tacacs timeout** @@ -1046,8 +1037,7 @@ When user has not configured server specific timeout, this global value shall be - Example: To configure non-default timeout value ``` - root@T1-2:~# config tacacs timeout 60 - root@T1-2:~# + admin@sonic:~$ sudo config tacacs timeout 60 ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#aaa--tacacs) @@ -1173,9 +1163,9 @@ When the optional argument "max_priority" is specified, each rule’s priority - Examples: ``` - admin@sonic:~$ config acl update full /etc/sonic/acl_full_snmp_1_2_ssh_4.json - admin@sonic:~$ config acl update full "--table_name SNMP-ACL /etc/sonic/acl_full_snmp_1_2_ssh_4.json" - admin@sonic:~$ config acl update full "--session_name everflow0 /etc/sonic/acl_full_snmp_1_2_ssh_4.json" + admin@sonic:~$ sudo config acl update full /etc/sonic/acl_full_snmp_1_2_ssh_4.json + admin@sonic:~$ sudo config acl update full "--table_name SNMP-ACL /etc/sonic/acl_full_snmp_1_2_ssh_4.json" + admin@sonic:~$ sudo config acl update full "--session_name everflow0 /etc/sonic/acl_full_snmp_1_2_ssh_4.json" ``` This command will remove all rules from all the ACL tables and insert all the rules present in this input file. @@ -1219,10 +1209,10 @@ When the optional argument "max_priority" is specified, each rule’s priority - Examples: ``` - admin@sonic:~$ config acl update incremental /etc/sonic/acl_incremental_snmp_1_3_ssh_4.json + admin@sonic:~$ sudo config acl update incremental /etc/sonic/acl_incremental_snmp_1_3_ssh_4.json ``` ``` - admin@sonic:~$ config acl update incremental "--session_name everflow0 /etc/sonic/acl_incremental_snmp_1_3_ssh_4.json" + admin@sonic:~$ sudo config acl update incremental "--session_name everflow0 /etc/sonic/acl_incremental_snmp_1_3_ssh_4.json" ``` Refer the example file [acl_incremental_snmp_1_3_ssh_4.json](#) that adds two rules for SNMP (Rule1 and Rule3) and one rule for SSH (Rule4) @@ -1371,7 +1361,7 @@ This command displays the summary of all IPv4 & IPv6 bgp neighbors that are conf - Example: ``` - admin@sonic-z9264f-9251:~# show ip bgp summary + admin@sonic:~$ show ip bgp summary IPv4 Unicast Summary: BGP router identifier 10.1.0.32, local AS number 65100 vrf-id 0 @@ -1391,7 +1381,7 @@ This command displays the summary of all IPv4 & IPv6 bgp neighbors that are conf - Example: ``` - admin@sonic-z9264f-9251:~# show bgp summary + admin@sonic:~$ show bgp summary IPv4 Unicast Summary: BGP router identifier 10.1.0.32, local AS number 65100 vrf-id 0 @@ -1603,7 +1593,7 @@ This command displays the routing policy that takes precedence over the other ro - Example: ``` - admin@T1-2:~$ show route-map + admin@sonic:~$ show route-map ZEBRA: route-map RM_SET_SRC, permit, sequence 10 Match clauses: @@ -1656,7 +1646,7 @@ When the session is shutdown using this command, BGP state in "show ip bgp summa - Usage: ``` - sudo config bgp shutdown all + config bgp shutdown all ``` - Example: @@ -1688,7 +1678,7 @@ This command is used to start up all the IPv4 & IPv6 BGP neighbors - Usage: ``` - sudo config bgp startup all + config bgp startup all ``` - Example: @@ -1703,7 +1693,7 @@ This command is used to start up the particular IPv4 or IPv6 BGP neighbor using - Usage: ``` - sudo config bgp startup neighbor ( | ) + config bgp startup neighbor ( | ) ``` - Examples: @@ -1721,7 +1711,7 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio - Usage: ``` - sudo config bgp remove neighbor + config bgp remove neighbor ``` - Examples: @@ -1786,7 +1776,7 @@ This command will configure the status of auto-restart feature for a specific co - Usage: ``` - sudo config container feature autorestart (enabled | disabled) + config container feature autorestart (enabled | disabled) ``` - Example: @@ -1816,8 +1806,6 @@ This command is used to add a DHCP Relay Destination IP address to the a VLAN. admin@sonic:~$ sudo config vlan dhcp_relay add 1000 7.7.7.7 Added DHCP relay destination address 7.7.7.7 to Vlan1000 Restarting DHCP relay service... - Running command: systemctl restart dhcp_relay - admin@str-s6000-acs-11:~$ ``` **config vlan dhcp_relay delete** @@ -1953,7 +1941,7 @@ This command will fail if the given name is already in use, if the type of count - Usage: ``` - admin@sonic:~$ sudo config dropcounters install [-d ] [-g ] [-a ] + config dropcounters install [-d ] [-g ] [-a ] ``` - Example: @@ -1969,7 +1957,7 @@ This command will fail if any of the specified drop reasons are not supported. - Usage: ``` - admin@sonic:~$ sudo config dropcounters add_reasons + config dropcounters add_reasons ``` - Example: @@ -1983,7 +1971,7 @@ This command is used to remove drop reasons from an already initialized counter. - Usage: ``` - admin@sonic:~$ sudo config dropcounters remove_reasons + config dropcounters remove_reasons ``` - Example: @@ -1997,7 +1985,7 @@ This command is used to delete a drop counter. - Usage: ``` - admin@sonic:~$ sudo config dropcounters delete + config dropcounters delete ``` - Example: @@ -2013,7 +2001,7 @@ This comnmand is used to clear drop counters. This is done on a per-user basis. - Usage: ``` - admin@sonic:~$ sonic-clear dropcounters + sonic-clear dropcounters ``` - Example: @@ -2043,7 +2031,7 @@ This command displays all the WRED profiles that are configured in the device. - Example: ``` - show ecn + admin@sonic:~$ show ecn Profile: **AZURE_LOSSLESS** ----------------------- ------- red_max_threshold 2097152 @@ -2092,7 +2080,7 @@ The list of the WRED profile fields that are configurable is listed in the below - Example (Configures the "red max threshold" for the WRED profile name "wredprofileabcd". It will create the WRED profile if it does not exist.): ``` - root@T1-2:~# config ecn -profile wredprofileabcd -rmax 100 + admin@sonic:~$ sudo config ecn -profile wredprofileabcd -rmax 100 ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#ecn) @@ -2112,8 +2100,7 @@ This command is used to change device hostname without traffic being impacted. - Example: ``` - admin@lnos-x1-a-csw06:~$ sudo config hostname CSW06 - Running command: service hostname-config restart + admin@sonic:~$ sudo config hostname CSW06 Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`. ``` @@ -2126,7 +2113,7 @@ Subsequent pages explain each of these commands in detail. - Example: ``` - user@debug:~$ show interfaces -? + admin@sonic:~$ show interfaces -? Show details of the network interfaces @@ -2226,13 +2213,13 @@ Optionally, you can specify a period (in seconds) with which to gather counters - NOTE: Interface counters can be cleared by the user with the following command: ``` - root@sonic:~# sonic-clear counters + admin@sonic:~$ sonic-clear counters ``` - NOTE: Layer 3 interface counters can be cleared by the user with the following command: ``` - root@sonic:~# sonic-clear rifcounters + admin@sonic:~$ sonic-clear rifcounters ``` **show interfaces description** @@ -2281,7 +2268,7 @@ This command is used to display the list of expected neighbors for all interface - Example: ``` - root@sonic-z9264f-9251:~# show interfaces neighbor expected + admin@sonic:~$ show interfaces neighbor expected LocalPort Neighbor NeighborPort NeighborLoopback NeighborMgmt NeighborType ----------- ---------- -------------- ------------------ -------------- -------------- Ethernet112 ARISTA01T1 Ethernet1 None 10.16.205.100 ToRRouter @@ -2321,10 +2308,8 @@ This command displays some more fields such as Lanes, Speed, MTU, Type, Asymmetr show interfaces status [] ``` -- Example: +- Example (show interface status of all interfaces): ``` - show interface status of all interfaces - admin@sonic:~$ show interfaces status Interface Lanes Speed MTU Alias Oper Admin Type Asym PFC ----------- --------------- ------- ----- --------------- ------ ------- ------ ---------- @@ -2336,8 +2321,6 @@ This command displays some more fields such as Lanes, Speed, MTU, Type, Asymmetr - Example (to only display the status for interface Ethernet0): ``` - show interface status for one particular interface - admin@sonic:~$ show interface status Ethernet0 Interface Lanes Speed MTU Alias Oper Admin ----------- -------- ------- ----- -------------- ------ ------- @@ -2752,7 +2735,7 @@ Refer the routing stack [Quagga Command Reference](https://www.quagga.net/docs/q - Example: ``` - show ip protocol + admin@sonic:~$ show ip protocol Protocol : route-map ------------------------ system : none @@ -3010,10 +2993,9 @@ If the argument is not specified, it prompts the user to confirm whether user re - Example: ``` - root@T1-2:~# config load + admin@sonic:~$ sudo config load Load config from the file /etc/sonic/config_db.json? [y/N]: y Running command: /usr/local/bin/sonic-cfggen -j /etc/sonic/config_db.json --write-to-db - root@T1-2:~# ``` ### Loading configuration from minigraph (XML) file @@ -3037,10 +3019,9 @@ If the argument is not specified, it prompts the user to confirm whether user re - Example: ``` - root@T1-2:~# config load_minigraph + admin@sonic:~$ sudo config load_minigraph Reload config from minigraph? [y/N]: y Running command: /usr/local/bin/sonic-cfggen -j /etc/sonic/config_db.json --write-to-db - root@T1-2:~# ``` ### Reloading Configuration @@ -3073,7 +3054,7 @@ If the argument is not specified, it prompts the user to confirm whether user re - Example: ``` - root@T1-2:~# config reload + admin@sonic:~$ sudo config reload Clear current config and reload config from the file /etc/sonic/config_db.json? [y/N]: y Running command: systemctl stop dhcp_relay Running command: systemctl stop swss @@ -3089,7 +3070,6 @@ If the argument is not specified, it prompts the user to confirm whether user re Running command: systemctl restart hostname-config Running command: systemctl restart interfaces-config Timeout, server 10.11.162.42 not responding. - root@T1-2:~# ``` @@ -3111,10 +3091,9 @@ If the argument is not specified, it prompts the user to confirm whether user re - Example: ``` - root@T1-2:~# config load_mgmt_config + admin@sonic:~$ sudo config load_mgmt_config Reload config from minigraph? [y/N]: y Running command: /usr/local/bin/sonic-cfggen -M /etc/sonic/device_desc.xml --write-to-db - root@T1-2:~# ``` @@ -3132,12 +3111,12 @@ Saved file can be transferred to remote machines for debugging. If users wants t - Example (Save configuration to /etc/sonic/config_db.json): ``` - root@T1-2:~# config save -y + admin@sonic:~$ sudo config save -y ``` - Example (Save configuration to a specified file): ``` - root@T1-2:~# config save -y /etc/sonic/config2.json + admin@sonic:~$ sudo config save -y /etc/sonic/config2.json ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#loading-reloading-and-saving-configuration) @@ -3158,7 +3137,7 @@ This command displays whether the management VRF is enabled or disabled. It also - Example: ``` - root@sonic:/etc/init.d# show mgmt-vrf + admin@sonic:~$ show mgmt-vrf ManagementVRF : Enabled @@ -3184,7 +3163,7 @@ This command displays the routes that are present in the routing table 5000 that - Example: ``` - root@sonic:/etc/init.d# show mgmt-vrf routes + admin@sonic:~$ show mgmt-vrf routes Routes in Management VRF Routing Table: default via 10.16.210.254 dev eth0 metric 201 @@ -3209,7 +3188,7 @@ This command displays the IP address(es) configured for the management interface - Example: ``` - root@sonic:/etc/init.d# show management_interface address + admin@sonic:~$ show management_interface address Management IP address = 10.16.210.75/24 Management NetWork Default Gateway = 10.16.210.254 Management IP address = FC00:2::32/64 @@ -3227,7 +3206,7 @@ This command displays the configured SNMP agent IP addresses. - Example: ``` - root@sonic-s6100-07:~# show snmpagentaddress + admin@sonic:~$ show snmpagentaddress ListenIP ListenPort ListenVrf ---------- ------------ ----------- 1.2.3.4 787 mgmt @@ -3244,7 +3223,7 @@ This command displays the configured SNMP Trap server IP addresses. - Example: ``` - root@sonic-s6100-07:~# show snmptrap + admin@sonic:~$ show snmptrap Version TrapReceiverIP Port VRF Community --------- ---------------- ------ ----- ----------- 2 31.31.31.31 456 mgmt public @@ -3263,7 +3242,7 @@ This command enables the management VRF in the system. This command restarts the - Example: ``` - root@sonic-s6100-07:~# config vrf add mgmt + admin@sonic:~$ sudo config vrf add mgmt ``` **config vrf del mgmt** @@ -3277,7 +3256,7 @@ This command disables the management VRF in the system. This command restarts th - Example: ``` - root@sonic-s6100-07:~# config vrf del mgmt + admin@sonic:~$ sudo config vrf del mgmt ``` **config snmpagentaddress add** @@ -3291,9 +3270,9 @@ This command adds the SNMP agent IP address on which the SNMP agent is expected - Example: ``` - root@sonic-s6100-07:~#config snmpagentaddress add -v mgmt -p 123 21.22.13.14 + admin@sonic:~$ sudo config snmpagentaddress add -v mgmt -p 123 21.22.13.14 - For this example, configuration goes into /etc/snmp/snmpd.conf inside snmp docker as follows. When "-v" parameter is not used, the additional "%" in the following line will not be present. + Note: For this example, configuration goes into /etc/snmp/snmpd.conf inside snmp docker as follows. When "-v" parameter is not used, the additional "%" in the following line will not be present. agentAddress 21.22.13.14:123%mgmt ``` @@ -3309,7 +3288,7 @@ This command deletes the SNMP agent IP address on which the SNMP agent is expect - Example: ``` - root@sonic-s6100-07:~#config snmpagentaddress del -v mgmt -p 123 21.22.13.14 + admin@sonic:~$ sudo config snmpagentaddress del -v mgmt -p 123 21.22.13.14 ``` @@ -3324,7 +3303,7 @@ This command modifies the SNMP trap server IP address to which the SNMP agent is - Example: ``` - root@sonic-s6100-07:~#config snmptrap modify 2 -p 456 -v mgmt 21.21.21.21 + admin@sonic:~$ sudo config snmptrap modify 2 -p 456 -v mgmt 21.21.21.21 For this example, configuration goes into /etc/snmp/snmpd.conf inside snmp docker as follows. When "-v" parameter is not used, the additional "%" in the following line will not be present. In case of SNMPv1, "trapsink" will be updated, in case of v2, "trap2sink" will be updated and in case of v3, "informsink" will be updated. @@ -3343,7 +3322,7 @@ This command deletes the SNMP Trap server IP address to which SNMP agent is expe - Example: ``` - root@sonic-s6100-07:~#config snmptrap del -v mgmt -p 123 21.22.13.14 + admin@sonic:~$ sudo config snmptrap del -v mgmt -p 123 21.22.13.14 ``` @@ -3392,12 +3371,11 @@ While adding a new session, users need to configure the following fields that ar - Example: ``` - root@T1-2:~# config mirror_session add mrr_abcd 1.2.3.4 20.21.22.23 8 100 0x6558 0 - root@T1-2:~# show mirror_session + admin@sonic:~$ sudo config mirror_session add mrr_abcd 1.2.3.4 20.21.22.23 8 100 0x6558 0 + admin@sonic:~$ show mirror_session Name Status SRC IP DST IP GRE DSCP TTL Queue --------- -------- ----------- ----------- ------ ------ ----- ------- mrr_abcd inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 - root@T1-2:~# ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#mirroring) @@ -3419,7 +3397,7 @@ With no optional arguments, the whole NAT configuration is displayed. - Example: ``` - root@sonic:/# show nat config static + admin@sonic:~$ show nat config static Nat Type IP Protocol Global IP Global L4 Port Local IP Local L4 Port Twice-Nat Id -------- ----------- ------------ -------------- ------------- ------------- ------------ @@ -3428,7 +3406,7 @@ With no optional arguments, the whole NAT configuration is displayed. dnat tcp 65.55.45.7 2000 20.0.0.1 4500 1 snat tcp 20.0.0.2 4000 65.55.45.8 1030 1 - root@sonic:/# show nat config pool + admin@sonic:~$ show nat config pool Pool Name Global IP Range Global L4 Port Range ------------ ------------------------- -------------------- @@ -3436,7 +3414,7 @@ With no optional arguments, the whole NAT configuration is displayed. Pool2 65.55.45.6-65.55.45.8 --- Pool3 65.55.45.10-65.55.45.15 500-1000 - root@sonic:/# show nat config bindings + admin@sonic:~$ show nat config bindings Binding Name Pool Name Access-List Nat Type Twice-Nat Id ------------ ------------ ------------ -------- ------------ @@ -3444,14 +3422,14 @@ With no optional arguments, the whole NAT configuration is displayed. Bind2 Pool2 1 snat 1 Bind3 Pool3 2 snat -- - root@sonic:/# show nat config globalvalues + admin@sonic:~$ show nat config globalvalues Admin Mode : enabled Global Timeout : 600 secs TCP Timeout : 86400 secs UDP Timeout : 300 secs - root@sonic:/# show nat config zones + admin@sonic:~$ show nat config zones Port Zone ---- ---- @@ -3470,7 +3448,7 @@ This command displays the NAT translation statistics for each entry. - Example: ``` - root@sonic:/# show nat statistics + admin@sonic:~$ show nat statistics Protocol Source Destination Packets Bytes -------- --------- -------------- ------------- ------------- @@ -3494,7 +3472,7 @@ This command displays the NAT translation entries. Giving the optional count argument displays only the details about the number of translation entries. - Example: ``` - root@sonic:/# show nat translations + admin@sonic:~$ show nat translations Static NAT Entries ................. 4 Static NAPT Entries ................. 2 @@ -3525,7 +3503,7 @@ Giving the optional count argument displays only the details about the number of tcp 20.0.0.1:5500 65.55.42.1:2000 65.55.42.1:1026 20.0.0.1:4500 tcp 20.0.0.1:4500 65.55.42.1:1026 65.55.42.1:2000 20.0.0.1:5500 - root@sonic:/# show nat translations count + admin@sonic:~$ show nat translations count Static NAT Entries ................. 4 Static NAPT Entries ................. 2 @@ -3571,10 +3549,10 @@ config nat remove static {{basic (global-ip) (local-ip)} | {{tcp | udp} (global- ``` - Example: ``` - root@sonic:/# config nat add static basic 65.55.45.1 12.12.12.14 -nat_type dnat - root@sonic:/# config nat add static tcp 65.55.45.2 100 12.12.12.15 200 -nat_type dnat + admin@sonic:~$ sudo config nat add static basic 65.55.45.1 12.12.12.14 -nat_type dnat + admin@sonic:~$ sudo config nat add static tcp 65.55.45.2 100 12.12.12.15 200 -nat_type dnat - root@sonic:/# show nat translations + admin@sonic:~$ show nat translations Static NAT Entries ................. 2 Static NAPT Entries ................. 2 @@ -3615,10 +3593,10 @@ config nat remove {pool (pool-name) | pools} ``` - Example: ``` - root@sonic:/# config nat add pool pool1 65.55.45.2-65.55.45.10 - root@sonic:/# config nat add pool pool2 65.55.45.3 100-1024 + admin@sonic:~$ sudo config nat add pool pool1 65.55.45.2-65.55.45.10 + admin@sonic:~$ sudo config nat add pool pool2 65.55.45.3 100-1024 - root@sonic:/# show nat config pool + admin@sonic:~$ show nat config pool Pool Name Global IP Range Global Port Range ----------- ---------------------- ------------------- @@ -3644,10 +3622,10 @@ config nat remove {binding (binding-name) | bindings} ``` - Example: ``` - root@sonic:/# config nat add binding bind1 pool1 acl1 - root@sonic:/# config nat add binding bind2 pool2 + admin@sonic:~$ sudo config nat add binding bind1 pool1 acl1 + admin@sonic:~$ sudo config nat add binding bind2 pool2 - root@sonic:/# show nat config bindings + admin@sonic:~$ show nat config bindings Binding Name Pool Name Access-List Nat Type Twice-NAT Id -------------- ----------- ------------- ---------- -------------- @@ -3669,9 +3647,9 @@ config nat remove {interface (interface-name) | interfaces} ``` - Example: ``` - root@sonic:/# config nat add interface Ethernet28 -nat_zone 1 + admin@sonic:~$ sudo config nat add interface Ethernet28 -nat_zone 1 - root@sonic:/# show nat config zones + admin@sonic:~$ show nat config zones Port Zone ---------- ------ @@ -3698,9 +3676,9 @@ config nat reset {tcp-timeout | timeout | udp-timeout} ``` - Example: ``` - root@sonic:/# config nat add set tcp-timeout 3600 + admin@sonic:~$ sudo config nat add set tcp-timeout 3600 - root@sonic:/# show nat config globalvalues + admin@sonic:~$ show nat config globalvalues Admin Mode : enabled Global Timeout : 600 secs @@ -3719,8 +3697,8 @@ This command is used to enable or disable the NAT feature. - Example: ``` - root@sonic:/# config nat feature enable - root@sonic:/# config nat feature disable + admin@sonic:~$ sudo config nat feature enable + admin@sonic:~$ sudo config nat feature disable ``` ### NAT Clear commands @@ -3822,7 +3800,7 @@ show platform firmware - Example: ```bash -root@sonic:/home/admin# show platform firmware +admin@sonic:~$ show platform firmware Chassis Module Component Version Description --------- -------- ----------- ----------------------- --------------------------------------- Chassis1 N/A BIOS 0ACLH004_02.02.007_9600 BIOS - Basic Input/Output System @@ -3844,12 +3822,12 @@ config platform firmware install module component - Example: ```bash -root@sonic:/home/admin# config platform firmware install chassis component BIOS fw /etc/mlnx/fw/sn3800/chassis1/bios.bin +admin@sonic:~$ sudo config platform firmware install chassis component BIOS fw /etc/mlnx/fw/sn3800/chassis1/bios.bin New firmware will be installed, continue? [y/N]: y Installing firmware: /etc/mlnx/fw/sn3800/chassis1/bios.bin -root@sonic:/home/admin# config platform firmware install module Module1 component BIOS fw http://mellanox.com/fw/sn3800/module1/bios.bin +admin@sonic:~$ sudo config platform firmware install module Module1 component BIOS fw http://mellanox.com/fw/sn3800/module1/bios.bin New firmware will be installed, continue? [y/N]: y Downloading firmware: [##################################################] 100% @@ -3948,7 +3926,7 @@ config platform firmware update [-y|--yes] [-f|--force] [-i|--image=current|next - Example: ```bash -root@sonic:/home/admin# config platform firmware update +admin@sonic:~$ sudo config platform firmware update Chassis Module Component Firmware Version Status Info --------- -------- ----------- ------------------------------------- ------------------------------------------------- ------------------ ----------------------- Chassis1 N/A BIOS /etc/mlnx/fw/sn3800/chassis1/bios.bin 0ACLH004_02.02.007_9600 / 0ACLH004_02.02.007_9600 up-to-date Cold reboot is required @@ -3988,7 +3966,7 @@ This command shows the SDK sniffer status - Example: ``` - admin@arc-switch1004:~$ show platform mlnx sniffer + admin@sonic:~$ show platform mlnx sniffer sdk sniffer is disabled ``` @@ -4004,7 +3982,7 @@ This means if ISSU is enabled on this SKU or not. A warm boot command can be exe - Example: ``` - admin@arc-switch1004:~$ show platform mlnx issu + admin@sonic:~$ show platform mlnx issu ISSU is enabled ``` @@ -4012,7 +3990,7 @@ In the case ISSU is disabled and warm-boot is called, the user will get a notifi - Example: ``` - admin@arc-switch1038:~$ sudo warm-reboot + admin@sonic:~$ sudo warm-reboot ISSU is not enabled on this HWSKU Warm reboot is not supported ``` @@ -4039,7 +4017,7 @@ In order to avoid that confirmation the -y / --yes option should be used. - Example: ``` - admin@arc-switch1038:~$ config platform mlnx sniffer sdk + admin@sonic:~$ config platform mlnx sniffer sdk To change SDK sniffer status, swss service will be restarted, continue? [y/N]: y NOTE: In order to avoid that confirmation the -y / --yes option should be used. ``` @@ -4155,7 +4133,7 @@ This command displays the details of Rx & Tx priority-flow-control (pfc) for all - NOTE: PFC counters can be cleared by the user with the following command: ``` - root@sonic:~# sonic-clear pfccounters + admin@sonic:~$ sonic-clear pfccounters ``` #### Queue And Priority-Group @@ -4238,7 +4216,7 @@ Optionally, you can specify an interface name in order to display only that part - NOTE: Queue counters can be cleared by the user with the following command: ``` - root@sonic:~# sonic-clear queuecounters + admin@sonic:~$ sonic-clear queuecounters ``` **show queue watermark** @@ -4252,7 +4230,7 @@ This command displays the user watermark for the queues (Egress shared pool occu - Example: ``` - admin@sonic:~$ show queue watermark unicast + admin@sonic:~$ show queue watermark unicast Egress shared pool occupancy per unicast queue: Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 ----------- ----- ----- ----- ----- ----- ----- ----- ----- @@ -4261,7 +4239,7 @@ This command displays the user watermark for the queues (Egress shared pool occu Ethernet8 0 0 0 0 0 0 0 0 Ethernet12 0 0 0 0 0 0 0 0 - admin@sonic:~$ show queue watermark multicast (Egress shared pool occupancy per multicast queue) + admin@sonic:~$ show queue watermark multicast (Egress shared pool occupancy per multicast queue) ``` **show priority-group** @@ -4275,7 +4253,7 @@ This command displays the user watermark or persistent-watermark for the Ingress - Example: ``` - admin@sonic:~$ show priority-group watermark shared + admin@sonic:~$ show priority-group watermark shared Ingress shared pool occupancy per PG: Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 ----------- ----- ----- ----- ----- ----- ----- ----- ----- @@ -4332,13 +4310,13 @@ This command displays the user persistet-watermark for the queues (Egress shared - NOTE: Both "user watermark" and "persistent watermark" can be cleared by user: ``` - root@sonic:~# sonic-clear queue persistent-watermark unicast + admin@sonic:~$ sonic-clear queue persistent-watermark unicast - root@sonic:~# sonic-clear queue persistent-watermark multicast + admin@sonic:~$ sonic-clear queue persistent-watermark multicast - root@sonic:~# sonic-clear priority-group persistent-watermark shared + admin@sonic:~$ sonic-clear priority-group persistent-watermark shared - root@sonic:~# sonic-clear priority-group persistent-watermark headroom + admin@sonic:~$ sonic-clear priority-group persistent-watermark headroom ``` @@ -4410,12 +4388,12 @@ Some of the example QOS configurations that users can modify are given below. - Example: ``` - root@T1-2:~# config qos reload + admin@sonic:~$ sudo config qos reload Running command: /usr/local/bin/sonic-cfggen -d -t /usr/share/sonic/device/x86_64-dell_z9100_c2538-r0/Force10-Z9100-C32/buffers.json.j2 >/tmp/buffers.json Running command: /usr/local/bin/sonic-cfggen -d -t /usr/share/sonic/device/x86_64-dell_z9100_c2538-r0/Force10-Z9100-C32/qos.json.j2 -y /etc/sonic/sonic_version.yml >/tmp/qos.json Running command: /usr/local/bin/sonic-cfggen -j /tmp/buffers.json --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/qos.json --write-to-db - root@T1-2:~# + In this example, it uses the buffers.json.j2 file and qos.json.j2 file from platform specific folders. When there are no changes in the platform specific configutation files, they internally use the file "/usr/share/sonic/templates/buffers_config.j2" and "/usr/share/sonic/templates/qos_config.j2" to generate the configuration. ``` @@ -4531,7 +4509,7 @@ This command displays the running configuration of the ntp module. - Example: ``` - admin@str-s6000-acs-11:~$ show runningconfiguration ntp + admin@sonic:~$ show runningconfiguration ntp NTP Servers ------------- 1.1.1.1 @@ -4549,7 +4527,7 @@ This command displays the running configuration of the syslog module. - Example: ``` - admin@str-s6000-acs-11:~$ show runningconfiguration syslog + admin@sonic:~$ show runningconfiguration syslog Syslog Servers ---------------- 4.4.4.4 @@ -4672,7 +4650,7 @@ This command displays the current CPU usage by process. This command uses linux' - Example: ``` - admin@SONiC:~$ show processes cpu + admin@sonic:~$ show processes cpu top - 23:50:08 up 1:18, 1 user, load average: 0.25, 0.29, 0.25 Tasks: 161 total, 1 running, 160 sleeping, 0 stopped, 0 zombie %Cpu(s): 3.8 us, 1.0 sy, 0.0 ni, 95.1 id, 0.1 wa, 0.0 hi, 0.0 si, 0.0 st @@ -4705,7 +4683,7 @@ This command displays the current memory usage by processes. This command uses l - Example: ``` - admin@SONiC:~$ show processes memory + admin@sonic:~$ show processes memory top - 23:41:24 up 7 days, 39 min, 2 users, load average: 1.21, 1.19, 1.18 Tasks: 191 total, 2 running, 189 sleeping, 0 stopped, 0 zombie %Cpu(s): 2.8 us, 20.7 sy, 0.0 ni, 76.3 id, 0.0 wa, 0.0 hi, 0.2 si, 0.0 st @@ -4742,7 +4720,7 @@ This command displays the current summary information about all the processes - Example: ``` - admin@SONiC:~$ show processes summary + admin@sonic:~$ show processes summary PID PPID CMD %MEM %CPU 1 0 /sbin/init 0.0 0.0 2 0 [kthreadd] 0.0 0.0 @@ -4769,7 +4747,7 @@ This command displays the state of all the SONiC processes running inside a dock - Example: ``` - admin@lnos-x1-a-asw02:~$ show services + admin@sonic:~$ show services dhcp_relay docker --------------------------- UID PID PPID C STIME TTY TIME CMD @@ -4829,7 +4807,7 @@ This command displays the system-wide memory utilization information – just a - Example: ``` - admin@lnos-x1-a-asw02:~$ show system-memory + admin@sonic:~$ show system-memory Command: free -m -h total used free shared buffers cached Mem: 3.9G 2.0G 1.8G 33M 324M 791M @@ -4848,7 +4826,7 @@ This command displays virtual address to the physical address translation status - Example: ``` - admin@T1-2:~$ show mmu + admin@sonic:~$ show mmu Pool: ingress_lossless_pool ---- -------- xoff 4194112 @@ -4937,7 +4915,7 @@ NOTE: This command is not working. It crashes as follows. A bug ticket is opened - Example: ``` - admin@T1-2:~$ show line + admin@sonic:~$ show line ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#System-State) @@ -5149,17 +5127,17 @@ warm-reboot command initiates a warm reboot of the device. warm-reboot command doesn't require setting warm restart configuration. The command will setup everything needed to perform warm reboot. -This command requires root privilege. Example: +This command requires root privilege. - Usage: ``` - sudo warm-reboot + warm-reboot [-h|-?|-v|-f|-r|-k|-x|-c |-s] ``` - Parameters: ``` -h,-? : get this help - -v : turn on verbose + -v : turn on verbose mode -f : force execution -r : reboot with /sbin/reboot -k : reboot with /sbin/kexec -e [default] @@ -5257,6 +5235,7 @@ This command displays the warm_restart state. - Example: ``` + admin@sonic:~$ show warm_restart state name restore_count state ---------- --------------- ---------- orchagent 0 @@ -5553,7 +5532,7 @@ This command is used to change the image that can be loaded in the *next* reboot - Example: ``` - admin@sonic:~$ sonic_installer set_next_boot SONiC-OS-HEAD.XXXX + admin@sonic:~$ sudo sonic_installer set_next_boot SONiC-OS-HEAD.XXXX ``` **sonic_installer remove** @@ -5633,12 +5612,12 @@ Once if users go to "vtysh", they can use the routing stack specific commands as - Example (Quagga Routing Stack): ``` - admin@T1-2:~$ vtysh + admin@sonic:~$ vtysh Hello, this is Quagga (version 0.99.24.1). Copyright 1996-2005 Kunihiro Ishiguro, et al. - T1-2# show route-map (This command displays the route-map that is configured for the routing protocol.) + sonic# show route-map (This command displays the route-map that is configured for the routing protocol.) ZEBRA: route-map RM_SET_SRC, permit, sequence 10 Match clauses: @@ -5811,7 +5790,7 @@ This command displays the routing policy that takes precedence over the other ro - Example: ``` - admin@T1-2:~$ show route-map + admin@sonic:~$ show route-map ZEBRA: route-map RM_SET_SRC, permit, sequence 10 Match clauses: From cdb5f045a110976ede6f3d89db1726bbd8964503 Mon Sep 17 00:00:00 2001 From: Akhilesh Samineni <47657796+AkhileshSamineni@users.noreply.github.com> Date: Sat, 22 Feb 2020 00:56:25 +0530 Subject: [PATCH 006/111] Multi-Db changes for NAT feature. (#818) --- config/nat.py | 6 +++--- scripts/natclear | 2 +- scripts/natshow | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/config/nat.py b/config/nat.py index 1c30aa80d4..762252eea8 100644 --- a/config/nat.py +++ b/config/nat.py @@ -283,7 +283,7 @@ def add_basic(ctx, global_ip, local_ip, nat_type, twice_nat_id): ctx.fail("Given entry is overlapping with existing Dynamic entry !!") if entryFound is False: - counters_db = SonicV2Connector(host="127.0.0.1") + counters_db = SonicV2Connector() counters_db.connect(counters_db.COUNTERS_DB) snat_entries = 0 max_entries = 0 @@ -363,7 +363,7 @@ def add_tcp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n ctx.fail("Given entry is overlapping with existing NAT entry !!") if entryFound is False: - counters_db = SonicV2Connector(host="127.0.0.1") + counters_db = SonicV2Connector() counters_db.connect(counters_db.COUNTERS_DB) snat_entries = 0 max_entries = 0 @@ -443,7 +443,7 @@ def add_udp(ctx, global_ip, global_port, local_ip, local_port, nat_type, twice_n ctx.fail("Given entry is overlapping with existing NAT entry !!") if entryFound is False: - counters_db = SonicV2Connector(host="127.0.0.1") + counters_db = SonicV2Connector() counters_db.connect(counters_db.COUNTERS_DB) snat_entries = 0 max_entries = 0 diff --git a/scripts/natclear b/scripts/natclear index 76cd394f50..be4b2060e3 100644 --- a/scripts/natclear +++ b/scripts/natclear @@ -22,7 +22,7 @@ class NatClear(object): def __init__(self): super(NatClear,self).__init__() - self.db = SonicV2Connector(host="127.0.0.1") + self.db = SonicV2Connector() self.db.connect(self.db.APPL_DB) return diff --git a/scripts/natshow b/scripts/natshow index d0dc753702..3d810d6aae 100644 --- a/scripts/natshow +++ b/scripts/natshow @@ -69,9 +69,9 @@ class NatShow(object): def __init__(self): super(NatShow,self).__init__() - self.asic_db = SonicV2Connector(host="127.0.0.1") - self.appl_db = SonicV2Connector(host="127.0.0.1") - self.counters_db = SonicV2Connector(host="127.0.0.1") + self.asic_db = SonicV2Connector() + self.appl_db = SonicV2Connector() + self.counters_db = SonicV2Connector() return def fetch_count(self): From e40192f86a7539cb4f5ec8c4a0115c24138e045b Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Sun, 23 Feb 2020 13:54:09 -0800 Subject: [PATCH 007/111] [reboot] make sure the reboot happens even if platform reboot failed (#819) * [reboot] make sure the reboot happens even if platform reboot failed Platform reboot tool could fail if the platform driver didn't load properly. In this case, a reboot is required to recover the device. We need to make sure the reboot tool is robust. Signed-off-by: Ying Xie * Rewording * add log message --- scripts/reboot | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/scripts/reboot b/scripts/reboot index 131aba24ec..15c936bc00 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -134,11 +134,19 @@ fi if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then VERBOSE=yes debug "Rebooting with platform ${PLATFORM} specific tool ..." exec ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} $@ -else - # If no platform-specific reboot tool, just run /sbin/reboot - exec /sbin/reboot $@ + + # There are a couple reasons execution reaches here: + # + # 1. The vendor platform reboot returned after scheduled the platform specific reboot. + # This is a vendor platform reboot code bug but it happens. + # 2. The vendor platform reboot failed. e.g. due to platform driver didn't load properly. + # + # As result if the reboot script reaches here. We should make the reboot happen. + # Sleep 1 second before calling /sbin/reboot to accommodate situation #1 above. + sleep 1 + + VERBOSE=yes debug "Platform specific reboot failed!" >&2 fi -# Should never reach here -VERBOSE=yes debug "Reboot failed!" >&2 -exit 1 +VERBOSE=yes debug "Issuing OS-level reboot ..." >&2 +exec /sbin/reboot $@ From 78b7235550ae775fc25cce5d2db7a1cfd82a855e Mon Sep 17 00:00:00 2001 From: shine4chen <37530989+shine4chen@users.noreply.github.com> Date: Mon, 24 Feb 2020 14:48:44 +0800 Subject: [PATCH 008/111] add support for MCLAG (#453) * add support for MCLAG Signed-off-by: shine.chen * add warm-reboot support for ICCPd Signed-off-by: shine.chen * ensure iccpd is there before stop iccpd Signed-off-by: shine.chen * fix service function * remove unused comment * refactor code according to feature management mechanism Signed-off-by: shine.chen --- config/main.py | 41 +++++++++++++++++++++++++++++++++++++++++ scripts/fast-reboot | 18 ++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/config/main.py b/config/main.py index a269420f10..cf18fc7e99 100755 --- a/config/main.py +++ b/config/main.py @@ -409,6 +409,14 @@ def _abort_if_false(ctx, param, value): if not value: ctx.abort() +def _get_optional_services(): + config_db = ConfigDBConnector() + config_db.connect() + optional_services_dict = config_db.get_table('FEATURE') + if not optional_services_dict: + return None + return optional_services_dict.keys() + def _stop_services(): # on Mellanox platform pmon is stopped by syncd services_to_stop = [ @@ -439,6 +447,17 @@ def _stop_services(): log_error("Stopping {} failed with error {}".format(service, e)) raise + # For optional services they don't start by default + for service in _get_optional_services(): + (out, err) = run_command("systemctl status {}".format(service), return_output = True) + if not err and 'Active: active (running)' in out: + try: + click.echo("Stopping service {} ...".format(service)) + run_command("systemctl stop {}".format(service)) + except SystemExit as e: + log_error("Stopping {} failed with error {}".format(service, e)) + raise + def _reset_failed_services(): services_to_reset = [ 'bgp', @@ -474,6 +493,17 @@ def _reset_failed_services(): log_error("Failed to reset failed status for service {}".format(service)) raise + # For optional services they don't start by default + for service in _get_optional_services(): + (out, err) = run_command("systemctl is-enabled {}".format(service), return_output = True) + if not err and 'enabled' in out: + try: + click.echo("Resetting failed status for service {} ...".format(service)) + run_command("systemctl reset-failed {}".format(service)) + except SystemExit as e: + log_error("Failed to reset failed status for service {}".format(service)) + raise + def _restart_services(): # on Mellanox platform pmon is started by syncd services_to_restart = [ @@ -508,6 +538,17 @@ def _restart_services(): log_error("Restart {} failed with error {}".format(service, e)) raise + # For optional services they don't start by default + for service in _get_optional_services(): + (out, err) = run_command("systemctl is-enabled {}".format(service), return_output = True) + if not err and 'enabled' in out: + try: + click.echo("Restarting service {} ...".format(service)) + run_command("systemctl restart {}".format(service)) + except SystemExit as e: + log_error("Restart {} failed with error {}".format(service, e)) + raise + def is_ipaddress(val): """ Validate if an entry is a valid IP """ if not val: diff --git a/scripts/fast-reboot b/scripts/fast-reboot index ac33bf85da..ae9a1382b5 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -430,6 +430,24 @@ debug "Stopped bgp ..." docker kill lldp &> /dev/null || debug "Docker lldp is not running ($?) ..." systemctl stop lldp +if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + if echo $(docker ps) | grep -q iccpd; then + docker kill iccpd > /dev/null || [ $? == 1 ] + fi +fi + +# Stop iccpd gracefully +if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then + if echo $(docker ps) | grep -q iccpd; then + debug "Stopping iccpd ..." + # Send USR1 signal to iccpd to stop it + # It will prepare iccpd for warm-reboot + # Note: We must send USR1 signal before syncd, or some state of iccpd maybe lost + docker exec -i iccpd pkill -USR1 iccpd || [ $? == 1 ] > /dev/null + debug "Stopped iccpd ..." + fi +fi + if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then # Kill teamd processes inside of teamd container with SIGUSR2 to allow them to send last LACP frames # We call `docker kill teamd` to ensure the container stops as quickly as possible, From 2eed75db4a4973d034bdce35fa8aa1cc147cb0fa Mon Sep 17 00:00:00 2001 From: Dong Zhang <41927498+dzhangalibaba@users.noreply.github.com> Date: Fri, 28 Feb 2020 13:42:42 -0800 Subject: [PATCH 009/111] [MultiDB] sonic-utilities - replace redis-cli/redis-dump with sonic-db-cli/sonic-db-dump (#810) * [MultiDB] sonic-utilities - replace redis-cli/redis-dump with sonic-db-cli/sonic-db-dump * only accept upper and underscore to prevent injection * quotation on db_name --- crm/main.py | 5 +++++ scripts/aclshow | 1 - scripts/fast-reboot | 18 +++++++++--------- scripts/generate_dump | 18 ++++++++---------- scripts/route_check_test.sh | 12 ++++++------ 5 files changed, 28 insertions(+), 26 deletions(-) diff --git a/crm/main.py b/crm/main.py index 696d7fdedf..241362ed13 100644 --- a/crm/main.py +++ b/crm/main.py @@ -124,6 +124,11 @@ def show_acl_table_resources(self): header = ("Table ID", "Resource Name", "Used Count", "Available Count") # Retrieve all ACL table keys from CRM:ACL_TABLE_STATS + # TODO + # Volodymyr is working on refactoring codes to access redis database via redis-py or swsssdk + # we should avoid using 'keys' operation via redis-cli or sonic-db-cli + # there would be an issue when KEY in database contains space or '\n' + # for loop on the non-tty 'keys' output will take the space or `\n` as seperator when parsing the element proc = Popen("docker exec -i database redis-cli --raw -n 2 KEYS *CRM:ACL_TABLE_STATS*", stdout=PIPE, stderr=PIPE, shell=True) out, err = proc.communicate() diff --git a/scripts/aclshow b/scripts/aclshow index 5eb9cb0d8c..a574a2325f 100755 --- a/scripts/aclshow +++ b/scripts/aclshow @@ -131,7 +131,6 @@ class AclStat(object): """ Get ACL counters from the DB """ - acl_counters_cmd = "docker exec -it database redis-cli --csv -n 2 hgetall COUNTERS:" counters_cnt = len(self.acl_rules) # num of counters should be the same as rules for table, rule in self.acl_rules.keys(): cnt_props = lowercase_keys(self.db.get_all(self.db.COUNTERS_DB, "COUNTERS:%s:%s" % (table, rule))) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index ae9a1382b5..66309fa85e 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -120,7 +120,7 @@ function init_warm_reboot_states() # the current DB contents will likely mark warm reboot is done. # Clear these states so that the next boot up image won't get confused. if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then - redis-cli -n 6 eval " + sonic-db-cli STATE_DB eval " for _, key in ipairs(redis.call('keys', 'WARM_RESTART_TABLE|*')) do redis.call('hdel', key, 'state') end @@ -132,11 +132,11 @@ function initialize_pre_shutdown() { debug "Initialize pre-shutdown ..." TABLE="WARM_RESTART_TABLE|warm-shutdown" - RESTORE_COUNT=`/usr/bin/redis-cli -n 6 hget "${TABLE}" restore_count` + RESTORE_COUNT=`sonic-db-cli STATE_DB hget "${TABLE}" restore_count` if [[ -z "$RESTORE_COUNT" ]]; then - /usr/bin/redis-cli -n 6 hset "${TABLE}" "restore_count" "0" > /dev/null + sonic-db-cli STATE_DB hset "${TABLE}" "restore_count" "0" > /dev/null fi - /usr/bin/redis-cli -n 6 hset "${TABLE}" "state" "requesting" > /dev/null + sonic-db-cli STATE_DB hset "${TABLE}" "state" "requesting" > /dev/null } function request_pre_shutdown() @@ -160,7 +160,7 @@ function wait_for_pre_shutdown_complete_or_fail() while [[ ${waitcount} -lt 600 ]]; do # timeout doesn't work with -i option of "docker exec". Therefore we have # to invoke docker exec directly below. - STATE=`timeout 5s docker exec database redis-cli -n 6 hget "${TABLE}" state; if [[ $? == 124 ]]; then echo "timed out"; fi` + STATE=`timeout 5s sonic-db-cli STATE_DB hget "${TABLE}" state; if [[ $? == 124 ]]; then echo "timed out"; fi` if [[ x"${STATE}" == x"timed out" ]]; then waitcount+=50 @@ -190,7 +190,7 @@ function backup_database() # Dump redis content to a file 'dump.rdb' in warmboot directory mkdir -p $WARM_DIR # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|* - redis-cli -n 6 eval " + sonic-db-cli STATE_DB eval " for _, k in ipairs(redis.call('keys', '*')) do if not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ and not string.match(k, 'MIRROR_SESSION_TABLE|') \ @@ -481,9 +481,9 @@ if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; t # Warm reboot: dump state to host disk if [[ "$REBOOT_TYPE" = "fastfast-reboot" ]]; then - redis-cli -n 1 FLUSHDB > /dev/null - redis-cli -n 2 FLUSHDB > /dev/null - redis-cli -n 5 FLUSHDB > /dev/null + sonic-db-cli ASIC_DB FLUSHDB > /dev/null + sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null + sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null fi # TODO: backup_database preserves FDB_TABLE diff --git a/scripts/generate_dump b/scripts/generate_dump index 3b3667e6dc..132d78f3c5 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -190,15 +190,13 @@ save_proc() { ############################################################################### # Dumps all fields and values from given Redis DB. # Arguments: -# DB id: id of DB for redis-cli # DB name: filename to which output will be saved # Returns: # None ############################################################################### save_redis() { - local db=$1 - local db_name=$2 - save_cmd "redis-dump -d $db -s /var/run/redis/redis.sock -y" "$db_name.json" + local db_name=$1 + save_cmd "sonic-db-dump -n '$db_name' -y" "$db_name.json" } ############################################################################### @@ -372,12 +370,12 @@ main() { save_nat_info - save_redis "0" "APP_DB" - save_redis "1" "ASIC_DB" - save_redis "2" "COUNTERS_DB" - save_redis "4" "CONFIG_DB" - save_redis "5" "FLEX_COUNTER_DB" - save_redis "6" "STATE_DB" + save_redis "APPL_DB" + save_redis "ASIC_DB" + save_redis "COUNTERS_DB" + save_redis "CONFIG_DB" + save_redis "FLEX_COUNTER_DB" + save_redis "STATE_DB" save_cmd "docker ps -a" "docker.ps" save_cmd "docker top pmon" "docker.pmon" diff --git a/scripts/route_check_test.sh b/scripts/route_check_test.sh index eb6f907560..505253863e 100755 --- a/scripts/route_check_test.sh +++ b/scripts/route_check_test.sh @@ -2,20 +2,20 @@ # add a route, interface & route-entry to simulate error # -redis-cli -n 0 hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" +sonic-db-cli APPL_DB hmset "ROUTE_TABLE:20c0:d9b8:99:80::/64" "nexthop" "fc00::72,fc00::76,fc00::7a,fc00::7e" "ifname" "PortChannel01,PortChannel02,PortChannel03,PortChannel04" -redis-cli -n 1 hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" +sonic-db-cli ASIC_DB hmset "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" "SAI_ROUTE_ENTRY_ATTR_NEXT_HOP_ID" "oid:0x5000000000614" -redis-cli -n 0 hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" +sonic-db-cli APPL_DB hmset "INTF_TABLE:PortChannel01:10.0.0.99/31" "scope" "global" "family" "IPv4" echo "expect errors!\n------\nRunning Route Check...\n" ./route_check.py echo "return value: $?" -redis-cli -n 0 del "ROUTE_TABLE:20c0:d9b8:99:80::/64" -redis-cli -n 1 del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" -redis-cli -n 0 del "INTF_TABLE:PortChannel01:10.0.0.99/31" +sonic-db-cli APPL_DB del "ROUTE_TABLE:20c0:d9b8:99:80::/64" +sonic-db-cli ASIC_DB del "ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY:{\"dest\":\"192.193.120.255/25\",\"switch_id\":\"oid:0x21000000000000\",\"vr\":\"oid:0x3000000000022\"}" +sonic-db-cli APPL_DB del "INTF_TABLE:PortChannel01:10.0.0.99/31" echo "expect success!\n------\nRunning Route Check...\n" From 6cdc5be6ddb41954aeb5717dabc55c4e727940c5 Mon Sep 17 00:00:00 2001 From: Andriy Moroz <22394886+andriymoroz-mlnx@users.noreply.github.com> Date: Sun, 1 Mar 2020 14:39:56 +0200 Subject: [PATCH 010/111] Update config/show to include PFC Watchdog commands (#736) * Add pfcwd utility to the config/show CLI Signed-off-by: Andriy Moroz * Add some more commands Signed-off-by: Andriy Moroz * [doc] Add start_default description Signed-off-by: Andriy Moroz --- config/main.py | 85 ++++++++++++++++++++++++++++++++++++++ doc/Command-Reference.md | 88 ++++++++++++++++++++++++++++++++++++++++ show/main.py | 24 +++++++++++ 3 files changed, 197 insertions(+) diff --git a/config/main.py b/config/main.py index cf18fc7e99..a525415af5 100755 --- a/config/main.py +++ b/config/main.py @@ -843,6 +843,91 @@ def remove(session_name): config_db.connect() config_db.set_entry("MIRROR_SESSION", session_name, None) +# +# 'pfcwd' group ('config pfcwd ...') +# +@config.group() +def pfcwd(): + """Configure pfc watchdog """ + pass + +@pfcwd.command() +@click.option('--action', '-a', type=click.Choice(['drop', 'forward', 'alert'])) +@click.option('--restoration-time', '-r', type=click.IntRange(100, 60000)) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.argument('ports', nargs=-1) +@click.argument('detection-time', type=click.IntRange(100, 5000)) +def start(action, restoration_time, ports, detection_time, verbose): + """ + Start PFC watchdog on port(s). To config all ports, use all as input. + + Example: + config pfcwd start --action drop ports all detection-time 400 --restoration-time 400 + """ + cmd = "pfcwd start" + + if action: + cmd += " --action {}".format(action) + + if ports: + ports = set(ports) - set(['ports', 'detection-time']) + cmd += " ports {}".format(' '.join(ports)) + + if detection_time: + cmd += " detection-time {}".format(detection_time) + + if restoration_time: + cmd += " --restoration-time {}".format(restoration_time) + + run_command(cmd, display_cmd=verbose) + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def stop(verbose): + """ Stop PFC watchdog """ + + cmd = "pfcwd stop" + + run_command(cmd, display_cmd=verbose) + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.argument('poll_interval', type=click.IntRange(100, 3000)) +def interval(poll_interval, verbose): + """ Set PFC watchdog counter polling interval (ms) """ + + cmd = "pfcwd interval {}".format(poll_interval) + + run_command(cmd, display_cmd=verbose) + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.argument('counter_poll', type=click.Choice(['enable', 'disable'])) +def counter_poll(counter_poll, verbose): + """ Enable/disable counter polling """ + + cmd = "pfcwd counter_poll {}".format(counter_poll) + + run_command(cmd, display_cmd=verbose) + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.argument('big_red_switch', type=click.Choice(['enable', 'disable'])) +def big_red_switch(big_red_switch, verbose): + """ Enable/disable BIG_RED_SWITCH mode """ + + cmd = "pfcwd big_red_switch {}".format(big_red_switch) + + run_command(cmd, display_cmd=verbose) + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def start_default(verbose): + """ Start PFC WD by default configurations """ + + cmd = "pfcwd start_default" + + run_command(cmd, display_cmd=verbose) # # 'qos' group ('config qos ...') diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 8fb8b23eb0..c10d5095e2 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -73,6 +73,7 @@ * [NTP](#ntp) * [NTP show commands](#ntp-show-commands) * [NTP config commands](#ntp-config-commands) +* [PFC Watchdog Commands](#pfc-watchdog-commands) * [Platform Component Firmware](#platform-component-firmware) * [Platform Component Firmware show commands](#platform-component-firmware-show-commands) * [Platform Component Firmware config commands](#platform-component-firmware-config-commands) @@ -3784,6 +3785,93 @@ This command is used to delete a configured NTP server IP address. Go Back To [Beginning of the document](#) or [Beginning of this section](#NTP) +# PFC Watchdog Commands +Detailed description of the PFC Watchdog could be fount on the [this wiki page](https://github.com/Azure/SONiC/wiki/PFC-Watchdog) + +**config pfcwd start \** + +This command starts PFC Watchdog + +- Usage: + ``` + config pfcwd start --action drop ports all detection-time 400 --restoration-time 400 + config pfcwd start --action forward ports Ethernet0 Ethernet8 detection-time 400 + ``` + +**config pfcwd stop** + +This command stops PFC Watchdog + +- Usage: + ``` + config pfcwd stop + ``` + +**config pfcwd interval \** + +This command sets PFC Watchdog counter polling interval (in ms) + +- Usage: + ``` + config pfcwd interval 200 + ``` + +**config pfcwd counter_poll \** + +This command enables or disables PFCWD related counters polling + +- Usage: + ``` + config pfcwd counter_poll disable + ``` + +**config pfcwd big_red_switch \** + +This command enables or disables PFCWD's "BIG RED SWITCH"(BRS). After enabling BRS PFC Watchdog will be activated on all ports/queues it is configured for no matter whether the storm was detected or not + +- Usage: + ``` + config pfcwd big_red_switch enable + ``` + +**config pfcwd start_default** + +This command starts PFC Watchdog with the default settings. + +- Usage: + ``` + config pfcwd start_default + ``` + +Default values are the following: + + - detection time - 200ms + - restoration time - 200ms + - polling interval - 200ms + - action - 'drop' + +Additionally if number of ports in the system exceeds 32, all times will be multiplied by roughly /32. + + +**show pfcwd config** + +This command shows current PFC Watchdog configuration + +- Usage: + ``` + show pfcwd config + ``` + +**show pfcwd stats** + +This command shows current PFC Watchdog statistics (storms detected, packets dropped, etc) + +- Usage: + ``` + show pfcwd stats + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#pfc-watchdog-commands) ## Platform Component Firmware diff --git a/show/main.py b/show/main.py index 4a52306044..c5add8729e 100755 --- a/show/main.py +++ b/show/main.py @@ -1043,6 +1043,30 @@ def counters(verbose): run_command(cmd, display_cmd=verbose) +# 'pfcwd' subcommand ("show pfcwd...") +@cli.group(cls=AliasedGroup, default_if_no_args=False) +def pfcwd(): + """Show details of the pfc watchdog """ + pass + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def config(verbose): + """Show pfc watchdog config""" + + cmd = "pfcwd show config" + + run_command(cmd, display_cmd=verbose) + +@pfcwd.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def stats(verbose): + """Show pfc watchdog stats""" + + cmd = "pfcwd show stats" + + run_command(cmd, display_cmd=verbose) + # 'naming_mode' subcommand ("show interfaces naming_mode") @interfaces.command() @click.option('--verbose', is_flag=True, help="Enable verbose output") From fe1216d313ec5154405ef49bd371723e639c1e13 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Sun, 1 Mar 2020 15:22:15 +0200 Subject: [PATCH 011/111] [fwutil]: Set default socket timeout for FW download to 30 sec. (#821) Signed-off-by: Nazarii Hnydyn --- fwutil/lib.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/fwutil/lib.py b/fwutil/lib.py index 3f6ecba4f3..60377c283a 100755 --- a/fwutil/lib.py +++ b/fwutil/lib.py @@ -9,6 +9,7 @@ import click import os import json + import socket import urllib import subprocess import sonic_device_util @@ -45,7 +46,8 @@ class URL(object): PB_INFO_SEPARATOR = " | " PB_FULL_TERMINAL_WIDTH = 0 - TMP_PATH = "/tmp" + DOWNLOAD_TIMEOUT = 30 + DOWNLOAD_PATH_TEMPLATE = "/tmp/{}" def __init__(self, url): self.__url = url @@ -115,13 +117,21 @@ def retrieve(self): if not extension: raise RuntimeError("Filename is malformed: did not find an extension") + default_timeout = socket.getdefaulttimeout() + socket.setdefaulttimeout(self.DOWNLOAD_TIMEOUT) + try: filename, headers = urllib.urlretrieve( self.__url, - "{}/{}".format(self.TMP_PATH, basename), + self.DOWNLOAD_PATH_TEMPLATE.format(basename), self.__reporthook ) + except: + if os.path.exists(self.DOWNLOAD_PATH_TEMPLATE.format(basename)): + os.remove(self.DOWNLOAD_PATH_TEMPLATE.format(basename)) + raise finally: + socket.setdefaulttimeout(default_timeout) self.__pb_reset() return filename, headers From 7f36545386030fe74aa9893121cde57124faae1c Mon Sep 17 00:00:00 2001 From: byu343 Date: Thu, 12 Mar 2020 12:02:19 -0700 Subject: [PATCH 012/111] Add kdump support for Aboot platforms (#824) --- scripts/reboot | 19 +++-- scripts/sonic-kdump-config | 152 +++++++++++++------------------------ 2 files changed, 63 insertions(+), 108 deletions(-) diff --git a/scripts/reboot b/scripts/reboot index 15c936bc00..434675bdbf 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -1,12 +1,5 @@ #!/bin/bash -# Reboot immediately if we run the kdump capture kernel -VMCORE_FILE=/proc/vmcore -if [ -e $VMCORE_FILE -a -s $VMCORE_FILE ]; then - debug "We have a /proc/vmcore, then we just kdump'ed" - /sbin/reboot -fi - REBOOT_USER=$(logname) REBOOT_TIME=$(date) PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) @@ -25,6 +18,18 @@ function debug() logger "$@" } +# Reboot immediately if we run the kdump capture kernel +VMCORE_FILE=/proc/vmcore +if [ -e $VMCORE_FILE -a -s $VMCORE_FILE ]; then + debug "We have a /proc/vmcore, then we just kdump'ed" + if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then + VERBOSE=yes debug "Rebooting with platform ${PLATFORM} specific tool ..." + exec ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} + else + /sbin/reboot + fi +fi + function stop_sonic_services() { if [[ x"$ASIC_TYPE" != x"mellanox" ]]; then diff --git a/scripts/sonic-kdump-config b/scripts/sonic-kdump-config index b7dee8a265..d785e59ab8 100755 --- a/scripts/sonic-kdump-config +++ b/scripts/sonic-kdump-config @@ -25,8 +25,10 @@ import subprocess import errno from swsssdk import ConfigDBConnector +aboot_cfg_template ="/host/image-%s/kernel-cmdline" grub_cfg = "/host/grub/grub.cfg" kdump_cfg = "/etc/default/kdump-tools" +machine_cfg = "/host/machine.conf" ## Same as print(), but output to stderr instead of stdout def print_err(*args): @@ -94,7 +96,7 @@ def get_next_image(): ## Search for Current/Next SONiC image in grub configuration # -# @param lines Lines read from grub.cfg file +# @param lines Lines read from grub.cfg/cmdline file # @param img String we are looking for ("loop=image...") # @return Index in lines array wehere we found the string def locate_image(lines, img): @@ -106,11 +108,11 @@ def locate_image(lines, img): pass return -1 -## Rewrite grub configuration file +## Rewrite grub/cmdline configuration file # -# @param lines Lines read from grub.cfg file -# @param fname Grub configuration file -def rewrite_grub_cfg(lines, fname): +# @param lines Lines read from grub/cmdline config file +# @param fname Grub/cmdline configuration file +def rewrite_cfg(lines, fname): fd = open(fname, "w") for x in lines: fd.writelines(x+'\n') @@ -254,50 +256,49 @@ def write_num_dumps(num_dumps): print_err("Error while writing KDUMP_NUM_DUMPS into %s" % kdump_cfg) sys.exit(1) -## Command: Enable kdump - Grub mode +## Enable kdump # # @param verbose If True, the function will display a few additinal information -# @return True is the grub configuration has changed, and False if it has not -def kdump_enable_grub(verbose, kdump_enabled, memory, num_dumps): +# @return True if the grub/cmdline cfg has changed, and False if it has not +def kdump_enable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file): - current_img = get_current_image(); if verbose: - print("Current image=[%s]" % current_img) + print("Enabling kdump for image=[%s]" % image) try: - lines = [line.rstrip('\n') for line in open(grub_cfg)] + lines = [line.rstrip('\n') for line in open(cmdline_file)] except Exception as exception: print_err(exception) sys.exit(1) - current_img_index = locate_image(lines, "loop=image-"+current_img) + img_index = locate_image(lines, "loop=image-"+image) if verbose: - print("Image index in grub.cfg=%d" % current_img_index) + print("Image index in %s=%d" % (cmdline_file, img_index)) changed = False crash_kernel_in_cmdline = search_for_crash_kernel_in_cmdline() if verbose: print("crash_kernel_in_cmdline=[%s]" % crash_kernel_in_cmdline) - curr_crash_kernel_mem = search_for_crash_kernel(lines[current_img_index]) + crash_kernel_mem = search_for_crash_kernel(lines[img_index]) if verbose: - print("curr_crash_kernel_mem=[%s]" % curr_crash_kernel_mem) - if curr_crash_kernel_mem == None: - lines[current_img_index] += " crashkernel=%s" % memory + print("crash_kernel_mem=[%s]" % crash_kernel_mem) + if crash_kernel_mem == None: + lines[img_index] += " crashkernel=%s" % memory changed = True if verbose: - print("Added to grub.cfg: [ crashkernel=%s ]" % memory) + print("Added to %s: [ crashkernel=%s ]" % (cmdline_file, memory)) else: - if curr_crash_kernel_mem == memory: - if curr_crash_kernel_mem == crash_kernel_in_cmdline: + if crash_kernel_mem == memory: + if crash_kernel_mem == crash_kernel_in_cmdline: print("kdump is already enabled") else: changed = True else: - lines[current_img_index] = lines[current_img_index].replace(curr_crash_kernel_mem, memory) + lines[img_index] = lines[img_index].replace(crash_kernel_mem, memory) changed = True if verbose: - print("Replace [%s] with [%s] in grub.cfg" % (curr_crash_kernel_mem, memory)) + print("Replace [%s] with [%s] in %s" % (crash_kernel_mem, memory, cmdline_file)) if changed: - rewrite_grub_cfg(lines, grub_cfg) + rewrite_cfg(lines, cmdline_file) write_use_kdump(1) @@ -306,8 +307,9 @@ def kdump_enable_grub(verbose, kdump_enabled, memory, num_dumps): ## Command: Enable kdump # # @param verbose If True, the function will display a few additinal information -# @return True is the grub configuration has changed, and False if it has not -def cmd_kdump_enable(verbose): +# @param image The image on which kdump settings are changed +# @return True if the grub/cmdline cfg has changed, and False if it has not +def cmd_kdump_enable(verbose, image=get_current_image()): kdump_enabled = get_kdump_administrative_mode() memory = get_kdump_memory() @@ -316,107 +318,51 @@ def cmd_kdump_enable(verbose): print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) if os.path.exists(grub_cfg): - return kdump_enable_grub(verbose, kdump_enabled, memory, num_dumps) + return kdump_enable(verbose, kdump_enabled, memory, num_dumps, image, grub_cfg) + elif open(machine_cfg, 'r').read().find('aboot_platform') >= 0: + aboot_cfg = aboot_cfg_template % image + return kdump_enable(verbose, kdump_enabled, memory, num_dumps, image, aboot_cfg) else: print("Feature not supported on this platform") - run_command("config kdump disable", use_shell=False); return False -## Command: Enable kdump on Next image only - Grub mode -# -# @param verbose If True, the function will display a few additional information -# @return True is the grub configuration has changed, and False if it has not -def kdump_config_next_grub(verbose, kdump_enabled, memory, num_dumps): - next_img = get_next_image(); - if verbose: - print("Next image=[%s]" % next_img) - try: - lines = [line.rstrip('\n') for line in open(grub_cfg)] - except Exception as exception: - print_err(exception) - sys.exit(1) - next_img_index = locate_image(lines, "loop=image-"+next_img) - if verbose: - print("Image index in grub.cfg=%d" % next_img_index) - - changed = False - crash_kernel_in_cmdline = search_for_crash_kernel_in_cmdline() - if verbose: - print("crash_kernel_in_cmdline=[%s]" % crash_kernel_in_cmdline) - curr_crash_kernel_mem = search_for_crash_kernel(lines[next_img_index]) - if verbose: - print("curr_crash_kernel_mem=[%s]" % curr_crash_kernel_mem) - if curr_crash_kernel_mem == None: - lines[next_img_index] += " crashkernel=%s" % memory - changed = True - if verbose: - print("Added to grub.cfg: [ crashkernel=%s ]" % memory) - else: - if curr_crash_kernel_mem == memory: - if curr_crash_kernel_mem == crash_kernel_in_cmdline: - print("kdump is already enabled") - else: - changed = True - else: - lines[next_img_index] = lines[next_img_index].replace(curr_crash_kernel_mem, memory) - changed = True - if verbose: - print("Replace [%s] with [%s] in grub.cfg" % (curr_crash_kernel_mem, memory)) - - if changed: - rewrite_grub_cfg(lines, grub_cfg) - - write_use_kdump(1) - - return changed - ## Command: Enable kdump on Next image only # # @param verbose If True, the function will display a few additional information -# @return True is the grub configuration has changed, and False if it has not +# @param image The image on which kdump settings are changed +# @return True if the grub/cmdline cfg has changed, and False if it has not def cmd_kdump_config_next(verbose): + return cmd_kdump_enable(verbose, image=get_next_image()) - kdump_enabled = get_kdump_administrative_mode() - memory = get_kdump_memory() - num_dumps = get_kdump_num_dumps() - if verbose: - print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) - - if os.path.exists(grub_cfg): - return kdump_config_next_grub(verbose, kdump_enabled, memory, num_dumps) - else: - return False - -## Command: Disable kdump - Grub mode +## Disable kdump # # @param verbose If True, the function will display a few additional information -def kdump_disable_grub(verbose, kdump_enabled, memory, num_dumps): +def kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file): write_use_kdump(0) - current_img = get_current_image(); if verbose: - print("Current image=[%s]\n" % current_img) - lines = [line.rstrip('\n') for line in open(grub_cfg)] - current_img_index = locate_image(lines, "loop=image-"+current_img) + print("Disabling kdump for image=[%s]\n" % image) + lines = [line.rstrip('\n') for line in open(cmdline_file)] + img_index = locate_image(lines, "loop=image-"+image) changed = False - curr_crash_kernel_mem = search_for_crash_kernel(lines[current_img_index]) - if curr_crash_kernel_mem == None: + crash_kernel_mem = search_for_crash_kernel(lines[img_index]) + if crash_kernel_mem == None: print("kdump is already disabled") else: - lines[current_img_index] = lines[current_img_index].replace("crashkernel="+curr_crash_kernel_mem, "") + lines[img_index] = lines[img_index].replace("crashkernel="+crash_kernel_mem, "") changed = True if verbose: - print("Removed [%s] in grub.cfg" % ("crashkernel="+curr_crash_kernel_mem)) + print("Removed [%s] in %s" % ("crashkernel="+crash_kernel_mem, cmdline_file)) if changed: - rewrite_grub_cfg(lines, grub_cfg) - + rewrite_cfg(lines, cmdline_file) ## Command: Disable kdump # # @param verbose If True, the function will display a few additional information -def cmd_kdump_disable(verbose): +# @param image The image on which kdump settings are changed +def cmd_kdump_disable(verbose, image=get_current_image()): kdump_enabled = get_kdump_administrative_mode() memory = get_kdump_memory() @@ -425,8 +371,12 @@ def cmd_kdump_disable(verbose): print("configDB: kdump_enabled=%d memory=[%s] num_nums=%d" % (kdump_enabled, memory, num_dumps)) if os.path.exists(grub_cfg): - return kdump_disable_grub(verbose, kdump_enabled, memory, num_dumps) + return kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, grub_cfg) + elif open(machine_cfg, 'r').read().find('aboot_platform') >= 0: + aboot_cfg = aboot_cfg_template % image + return kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, aboot_cfg) else: + print("Feature not supported on this platform") return False ## Command: Set / Get memory From 46ae40fc3ff600f922b43420312324c87f35ade6 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Tue, 17 Mar 2020 23:31:47 +0800 Subject: [PATCH 013/111] [Mellanox] add document for thermal control related cli (#832) * [Mellanox] add document for thermal control related cli * Update review comments thermal zones -> thermal sensors * Make example better --- doc/Command-Reference.md | 80 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index c10d5095e2..dd8f9f9b8e 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -708,6 +708,86 @@ This command displays the status of the device's power supply units PSU 2 OK ``` +**show platform fan** + +This command displays the status of the device's fans + +- Usage: + ``` + show platform fan + ``` + +- Example: + ``` + admin@sonic:~$ show platform fan + FAN Speed Direction Presence Status Timestamp + ----------- -------- ----------- ---------- -------- ----------------- + fan1 34% intake Present OK 20200302 06:58:56 + fan2 43% intake Present OK 20200302 06:58:56 + fan3 38% intake Present OK 20200302 06:58:56 + fan4 49% intake Present OK 20200302 06:58:57 + fan5 38% exhaust Present OK 20200302 06:58:57 + fan6 48% exhaust Present OK 20200302 06:58:57 + fan7 39% exhaust Present OK 20200302 06:58:57 + fan8 48% exhaust Present OK 20200302 06:58:57 + ``` + +**show platform temperature** + +This command displays the status of the device's thermal sensors + +- Usage: + ``` + show platform temperature + ``` + +- Example: + ``` + admin@sonic:~$ show platform temperature + NAME Temperature High Th Low Th Crit High Th Crit Low Th Warning Timestamp + ---------------------- ------------- --------- -------- -------------- ------------- --------- ----------------- + Ambient ASIC Temp 37.0 100.0 N/A 120.0 N/A False 20200302 06:58:57 + Ambient Fan Side Temp 28.5 100.0 N/A 120.0 N/A False 20200302 06:58:57 + Ambient Port Side Temp 31.0 100.0 N/A 120.0 N/A False 20200302 06:58:57 + CPU Core 0 Temp 36.0 87.0 N/A 105.0 N/A False 20200302 06:59:57 + CPU Core 1 Temp 38.0 87.0 N/A 105.0 N/A False 20200302 06:59:57 + CPU Pack Temp 38.0 87.0 N/A 105.0 N/A False 20200302 06:59:57 + PSU-1 Temp 28.0 100.0 N/A 120.0 N/A False 20200302 06:59:58 + PSU-2 Temp 28.0 100.0 N/A 120.0 N/A False 20200302 06:59:58 + xSFP module 1 Temp 31.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 2 Temp 35.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 3 Temp 32.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 4 Temp 33.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 5 Temp 34.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 6 Temp 36.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 7 Temp 33.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 8 Temp 33.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 9 Temp 32.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 10 Temp 38.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 11 Temp 38.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 12 Temp 39.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 13 Temp 35.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 14 Temp 37.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 15 Temp 36.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 16 Temp 36.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 17 Temp 32.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 18 Temp 34.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 19 Temp 30.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 20 Temp 31.5 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 21 Temp 34.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 22 Temp 34.4 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 23 Temp 34.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 24 Temp 35.6 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 25 Temp 38.0 70.0 N/A 90.0 N/A False 20200302 06:59:57 + xSFP module 26 Temp 32.2 70.0 N/A 90.0 N/A False 20200302 06:59:58 + xSFP module 27 Temp 39.0 70.0 N/A 90.0 N/A False 20200302 06:59:58 + xSFP module 28 Temp 30.1 70.0 N/A 90.0 N/A False 20200302 06:59:58 + xSFP module 29 Temp 32.0 70.0 N/A 90.0 N/A False 20200302 06:59:58 + xSFP module 30 Temp 35.3 70.0 N/A 90.0 N/A False 20200302 06:59:58 + xSFP module 31 Temp 31.0 70.0 N/A 90.0 N/A False 20200302 06:59:58 + xSFP module 32 Temp 39.5 70.0 N/A 90.0 N/A False 20200302 06:59:58 + ``` + #### Transceivers Displays diagnostic monitoring information of the transceivers From 9563e3d366b897502839e48b2f5645521fceb785 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Tue, 17 Mar 2020 14:38:00 -0700 Subject: [PATCH 014/111] [intfutil] set speed to 0 when interface speed is not available (#839) This is not an issue with normal and correct configuration. The issue was exposed when there is an incorrect configuration, e.g. contain wrong port names. These wrong port names will still get populated to the app_db but will not have speed associated. Lack of speed entry will cause "show interface status" to throw exception. Signed-off-by: Ying Xie --- scripts/intfutil | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/intfutil b/scripts/intfutil index bad72f27c4..1794399890 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -276,7 +276,7 @@ def po_speed_dict(po_int_dict, appl_db): elif len(value) > 1: for intf in value: temp_speed = appl_db.get(appl_db.APPL_DB, "PORT_TABLE:" + intf, "speed") - temp_speed = int(temp_speed) + temp_speed = int(temp_speed) if temp_speed else 0 agg_speed_list.append(temp_speed) interface_speed = sum(agg_speed_list) interface_speed = str(interface_speed) From ce5e81acbc0ea41c9e691f3ef8a101d752cb3f26 Mon Sep 17 00:00:00 2001 From: lguohan Date: Wed, 18 Mar 2020 16:54:42 -0700 Subject: [PATCH 015/111] [config]: add syslog messages to config load_minigraph/reload (#843) add stoping/restarting services messages in syslog Signed-off-by: Guohan Lu --- config/main.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config/main.py b/config/main.py index a525415af5..f987c8774d 100755 --- a/config/main.py +++ b/config/main.py @@ -612,6 +612,7 @@ def reload(filename, yes, load_sysinfo): cfg_hwsku = cfg_hwsku.strip() #Stop services before config push + log_info("'reload' stopping services...") _stop_services() config_db = ConfigDBConnector() config_db.connect() @@ -637,6 +638,7 @@ def reload(filename, yes, load_sysinfo): # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them _reset_failed_services() + log_info("'reload' restarting services...") _restart_services() @config.command() @@ -681,6 +683,7 @@ def load_minigraph(): device_type = device_type.strip() #Stop services before config push + log_info("'load_minigraph' stopping services...") _stop_services() config_db = ConfigDBConnector() @@ -708,6 +711,7 @@ def load_minigraph(): # status from all services before we attempt to restart them _reset_failed_services() #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. + log_info("'load_minigraph' restarting services...") _restart_services() click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.") From 6af3dce23d49d5cdf3b0c010721da217dfb53cb8 Mon Sep 17 00:00:00 2001 From: "arheneus@marvell.com" <51254330+antony-rheneus@users.noreply.github.com> Date: Fri, 20 Mar 2020 10:14:27 +0530 Subject: [PATCH 016/111] [sonic_installer] Enable ARM64 arch (#811) Signed-off-by: Antony Rheneus --- sonic_installer/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index a3b2e81cb3..393055b3f6 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -26,7 +26,7 @@ BOOTLOADER_TYPE_GRUB = 'grub' BOOTLOADER_TYPE_UBOOT = 'uboot' ARCH = platform.machine() -BOOTLOADER = BOOTLOADER_TYPE_UBOOT if "arm" in ARCH else BOOTLOADER_TYPE_GRUB +BOOTLOADER = BOOTLOADER_TYPE_UBOOT if ("arm" in ARCH) or ("aarch64" in ARCH) else BOOTLOADER_TYPE_GRUB # # Helper functions From abdc68d12acf275944d80fefe2af323805b32c2a Mon Sep 17 00:00:00 2001 From: Kebo Liu Date: Sat, 21 Mar 2020 07:53:41 +0800 Subject: [PATCH 017/111] [db_migrator]Do DB migration for buffer pool size change on Mellanox platform (#833) * do DB migration for buffer pool size change with new SDK version * fix review comments enhace migration fail case * make migrator can work with warm reboot case * ehnahce the logic to cover more corner case simplify the way to generate new buffer configuration. * remove code to get info from config_db.json since it's not necessary 3800 platform need special buffer configuration --- scripts/db_migrator.py | 118 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 115 insertions(+), 3 deletions(-) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index eefc2cbb30..b610a35912 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -5,6 +5,10 @@ import argparse import syslog from swsssdk import ConfigDBConnector +import sonic_device_util +import os +import subprocess +import json SYSLOG_IDENTIFIER = 'db_migrator' @@ -35,7 +39,7 @@ def __init__(self, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_1_0_2' + self.CURRENT_VERSION = 'version_1_0_3' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -96,6 +100,100 @@ def migrate_interface_table(self): self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) + def mlnx_migrate_buffer_pool_size(self): + """ + On Mellanox platform the buffer pool size changed since + version with new SDK 4.3.3052, SONiC to SONiC update + from version with old SDK will be broken without migration. + This migration is specifically for Mellanox platform. + """ + # Buffer pools defined in version 1_0_2 + buffer_pools = ['ingress_lossless_pool', 'egress_lossless_pool', 'ingress_lossy_pool', 'egress_lossy_pool'] + + # Old default buffer pool values on Mellanox platform + spc1_t0_default_value = [{'ingress_lossless_pool': '4194304'}, {'egress_lossless_pool': '16777152'}, {'ingress_lossy_pool': '7340032'}, {'egress_lossy_pool': '7340032'}] + spc1_t1_default_value = [{'ingress_lossless_pool': '2097152'}, {'egress_lossless_pool': '16777152'}, {'ingress_lossy_pool': '5242880'}, {'egress_lossy_pool': '5242880'}] + spc2_t0_default_value = [{'ingress_lossless_pool': '8224768'}, {'egress_lossless_pool': '35966016'}, {'ingress_lossy_pool': '8224768'}, {'egress_lossy_pool': '8224768'}] + spc2_t1_default_value = [{'ingress_lossless_pool': '12042240'}, {'egress_lossless_pool': '35966016'}, {'ingress_lossy_pool': '12042240'}, {'egress_lossy_pool': '12042240'}] + + # New default buffer pool configuration on Mellanox platform + spc1_t0_default_config = {"ingress_lossless_pool": { "size": "5029836", "type": "ingress", "mode": "dynamic" }, + "ingress_lossy_pool": { "size": "5029836", "type": "ingress", "mode": "dynamic" }, + "egress_lossless_pool": { "size": "14024599", "type": "egress", "mode": "dynamic" }, + "egress_lossy_pool": {"size": "5029836", "type": "egress", "mode": "dynamic" } } + spc1_t1_default_config = {"ingress_lossless_pool": { "size": "2097100", "type": "ingress", "mode": "dynamic" }, + "ingress_lossy_pool": { "size": "2097100", "type": "ingress", "mode": "dynamic" }, + "egress_lossless_pool": { "size": "14024599", "type": "egress", "mode": "dynamic" }, + "egress_lossy_pool": {"size": "2097100", "type": "egress", "mode": "dynamic" } } + spc2_t0_default_config = {"ingress_lossless_pool": { "size": "14983147", "type": "ingress", "mode": "dynamic" }, + "ingress_lossy_pool": { "size": "14983147", "type": "ingress", "mode": "dynamic" }, + "egress_lossless_pool": { "size": "34340822", "type": "egress", "mode": "dynamic" }, + "egress_lossy_pool": {"size": "14983147", "type": "egress", "mode": "dynamic" } } + spc2_t1_default_config = {"ingress_lossless_pool": { "size": "9158635", "type": "ingress", "mode": "dynamic" }, + "ingress_lossy_pool": { "size": "9158635", "type": "ingress", "mode": "dynamic" }, + "egress_lossless_pool": { "size": "34340822", "type": "egress", "mode": "dynamic" }, + "egress_lossy_pool": {"size": "9158635", "type": "egress", "mode": "dynamic" } } + # 3800 platform has gearbox installed so the buffer pool size is different with other Spectrum2 platform + spc2_3800_t0_default_config = {"ingress_lossless_pool": { "size": "28196784", "type": "ingress", "mode": "dynamic" }, + "ingress_lossy_pool": { "size": "28196784", "type": "ingress", "mode": "dynamic" }, + "egress_lossless_pool": { "size": "34340832", "type": "egress", "mode": "dynamic" }, + "egress_lossy_pool": {"size": "28196784", "type": "egress", "mode": "dynamic" } } + spc2_3800_t1_default_config = {"ingress_lossless_pool": { "size": "17891280", "type": "ingress", "mode": "dynamic" }, + "ingress_lossy_pool": { "size": "17891280", "type": "ingress", "mode": "dynamic" }, + "egress_lossless_pool": { "size": "34340832", "type": "egress", "mode": "dynamic" }, + "egress_lossy_pool": {"size": "17891280", "type": "egress", "mode": "dynamic" } } + + # Try to get related info from DB + buffer_pool_conf = {} + device_data = self.configDB.get_table('DEVICE_METADATA') + if 'localhost' in device_data.keys(): + hwsku = device_data['localhost']['hwsku'] + platform = device_data['localhost']['platform'] + else: + log_error("Trying to get DEVICE_METADATA from DB but doesn't exist, skip migration") + return False + buffer_pool_conf = self.configDB.get_table('BUFFER_POOL') + + # Get current buffer pool configuration, only migrate configuration which + # with default values, if it's not default, leave it as is. + pool_size_in_db_list = [] + pools_in_db = buffer_pool_conf.keys() + + # Buffer pool numbers is different with default, don't need migrate + if len(pools_in_db) != len(buffer_pools): + return True + + # If some buffer pool is not default ones, don't need migrate + for buffer_pool in buffer_pools: + if buffer_pool not in pools_in_db: + return True + pool_size_in_db_list.append({buffer_pool: buffer_pool_conf[buffer_pool]['size']}) + + # To check if the buffer pool size is equal to old default values + new_buffer_pool_conf = None + if pool_size_in_db_list == spc1_t0_default_value: + new_buffer_pool_conf = spc1_t0_default_config + elif pool_size_in_db_list == spc1_t1_default_value: + new_buffer_pool_conf = spc1_t1_default_config + elif pool_size_in_db_list == spc2_t0_default_value: + if platform == 'x86_64-mlnx_msn3800-r0': + new_buffer_pool_conf = spc2_3800_t0_default_config + else: + new_buffer_pool_conf = spc2_t0_default_config + elif pool_size_in_db_list == spc2_t1_default_value: + if platform == 'x86_64-mlnx_msn3800-r0': + new_buffer_pool_conf = spc2_3800_t1_default_config + else: + new_buffer_pool_conf = spc2_t1_default_config + else: + # It's not using default buffer pool configuration, no migration needed. + log_info("buffer pool size is not old default value, no need to migrate") + return True + # Migrate old buffer conf to latest. + for pool in buffer_pools: + self.configDB.set_entry('BUFFER_POOL', pool, new_buffer_pool_conf[pool]) + log_info("Successfully migrate mlnx buffer pool size to the latest.") + return True def version_unknown(self): """ @@ -127,13 +225,27 @@ def version_1_0_1(self): self.migrate_interface_table() self.set_version('version_1_0_2') - return None + return 'version_1_0_2' def version_1_0_2(self): """ - Current latest version. Nothing to do here. + Version 1_0_2. """ log_info('Handling version_1_0_2') + # Check ASIC type, if Mellanox platform then need DB migration + version_info = sonic_device_util.get_sonic_version_info() + if version_info['asic_type'] == "mellanox": + if self.mlnx_migrate_buffer_pool_size(): + self.set_version('version_1_0_3') + else: + self.set_version('version_1_0_3') + return None + + def version_1_0_3(self): + """ + Current latest version. Nothing to do here. + """ + log_info('Handling version_1_0_3') return None From f9352df2f094dfd4c1c7b750675b83d7375a375d Mon Sep 17 00:00:00 2001 From: lguohan Date: Sat, 21 Mar 2020 18:18:53 -0700 Subject: [PATCH 018/111] explicitly specify command with underscores (#846) Starting click 7.0. The default behavior of a command with under scores will be replace with dashes. this is to address the above default behavior change, so that the command remains the same. more details can be found: https://github.com/pallets/click/issues/1123 --- config/main.py | 4 ++-- pfcwd/main.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/config/main.py b/config/main.py index f987c8774d..820d4085e6 100755 --- a/config/main.py +++ b/config/main.py @@ -641,7 +641,7 @@ def reload(filename, yes, load_sysinfo): log_info("'reload' restarting services...") _restart_services() -@config.command() +@config.command("load_mgmt_config") @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload mgmt config?') @click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True)) @@ -665,7 +665,7 @@ def load_mgmt_config(filename): run_command(command, display_cmd=True, ignore_error=True) click.echo("Please note loaded setting will be lost after system reboot. To preserve setting, run `config save`.") -@config.command() +@config.command("load_minigraph") @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload config from minigraph?') def load_minigraph(): diff --git a/pfcwd/main.py b/pfcwd/main.py index 880a5f33f8..3a27e993a8 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -221,7 +221,7 @@ def stop(ports): configdb.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, port, None) # Set WD default configuration on server facing ports when enable flag is on -@cli.command() +@cli.command("start_default") def start_default(): """ Start PFC WD by default configurations """ if os.geteuid() != 0: From c6e23da6c9fa58ae973708830705c9159caab9a7 Mon Sep 17 00:00:00 2001 From: padmanarayana Date: Sat, 21 Mar 2020 18:25:42 -0700 Subject: [PATCH 019/111] Update Command Reference with sFlow section (#841) --- doc/Command-Reference.md | 193 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 193 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index dd8f9f9b8e..c6f3fbfd63 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -86,6 +86,9 @@ * [PFC](#pfc) * [Queue And Priority-Group](#queue-and-priority-group) * [QoS config commands](#qos-config-commands) +* [sFlow](#sflow) + * [sFlow Show commands](#sflow-show-commands) + * [sFlow Config commands](#sflow-config-commands) * [Startup & Running Configuration](#startup--running-configuration) * [Startup Configuration](#startup-configuration) * [Running Configuration](#running-configuration) @@ -4568,6 +4571,196 @@ Some of the example QOS configurations that users can modify are given below. Go Back To [Beginning of the document](#) or [Beginning of this section](#qos) +## sFlow + +### sFlow Show commands + +**show sflow** + +This command displays the global sFlow configuration that includes the admin state, collectors, the Agent ID and counter polling interval. + +- Usage: + ``` + show sflow + ``` + +- Example: + ``` + admin@sonic:~# show sflow + sFlow Global Information: + sFlow Admin State: up + sFlow Polling Interval: default + sFlow AgentID: lo + + 2 Collectors configured: + Name: collector_A IP addr: 10.11.46.2 UDP port: 6343 + Name: collector_lo IP addr: 127.0.0.1 UDP port: 6343 + ``` + + +**show sflow interface** + +This command displays the per-interface sflow admin status and the sampling rate. + +- Usage: + ``` + show sflow interface + ``` + +- Example: + ``` + admin@sonic:~# show sflow interface + + sFlow interface configurations + +-------------+---------------+-----------------+ + | Interface | Admin State | Sampling Rate | + +=============+===============+=================+ + | Ethernet0 | up | 4000 | + +-------------+---------------+-----------------+ + | Ethernet1 | up | 4000 | + +-------------+---------------+-----------------+ + ... + +-------------+---------------+-----------------+ + | Ethernet61 | up | 4000 | + +-------------+---------------+-----------------+ + | Ethernet62 | up | 4000 | + +-------------+---------------+-----------------+ + | Ethernet63 | up | 4000 | + +-------------+---------------+-----------------+ + + ``` + +### sFlow Config commands + +**config sflow collector add** + +This command is used to add a sFlow collector. Note that a maximum of 2 collectors is allowed. + +- Usage: + ``` + config sflow collector add [port ] + ``` + + - Parameters: + - collector-name: unique name of the sFlow collector + - ipv4-address : IP address of the collector in dotted decimal format for IPv4 + - ipv6-address : x: x: x: x::x format for IPv6 address of the collector (where :: notation specifies successive hexadecimal fields of zeros) + - port (OPTIONAL): specifies the UDP port of the collector (the range is from 0 to 65535. The default is 6343.) + +- Example: + ``` + admin@sonic:~# sudo config sflow collector add collector_A 10.11.46.2 + ``` + +**config sflow collector del** + +This command is used to delete a sFlow collector with the given name. + +- Usage: + ``` + config sflow collector del + ``` + + - Parameters: + - collector-name: unique name of the sFlow collector + +- Example: + ``` + admin@sonic:~# sudo config sflow collector del collector_A + ``` + +**config sflow agent-id** + +This command is used to add/delete the sFlow agent-id. This setting is global (applicable to both collectors) and optional. Only a single agent-id is allowed. If agent-id is not specified (with this CLI), an appropriate IP that belongs to the switch is used as the agent-id based on some simple heuristics. + +- Usage: + ``` + config sflow agent-id + ``` + + - Parameters: + - interface-name: specify the interface name whose ipv4 or ipv6 address will be used as the agent-id in sFlow datagrams. + +- Example: + ``` + admin@sonic:~# sudo config sflow agent-id add lo + ``` + +**config sflow** + +Globally, sFlow is disabled by default. When sFlow is enabled globally, the sflow deamon is started and sampling will start on all interfaces which have sFlow enabled at the interface level (see “config sflow interface…”). When sflow is disabled globally, sampling is stopped on all relevant interfaces and sflow daemon is stopped. + +- Usage: + ``` + config sflow + ``` +- Example: + ``` + admin@sonic:~# sudo config sflow enable + ``` +**config sflow interface** + +Enable/disable sflow at an interface level. By default, sflow is enabled on all interfaces at the interface level. Use this command to explicitly disable sFlow for a specific interface. An interface is sampled if sflow is enabled globally as well as at the interface level. Note that this configuration deals only with sFlow flow samples and not counter samples. + +- Usage: + ``` + config sflow interface + ``` + + - Parameters: + - interface-name: specify the interface for which sFlow flow samples have to be enabled/disabled. The “all” keyword is used as a convenience to enable/disable sflow at the interface level for all the interfaces. + +- Example: + ``` + admin@sonic:~# sudo config sflow interface disable Ethernet40 + ``` + +**config sflow interface sample-rate** + +Configure the sample-rate for a specific interface. + +The default sample rate for any interface is (ifSpeed / 1e6) where ifSpeed is in bits/sec. So, the default sample rate based on interface speed is: + + 1-in-1000 for a 1G link + 1-in-10,000 for a 10G link + 1-in-40,000 for a 40G link + 1-in-50,000 for a 50G link + 1-in-100,000 for a 100G link + +It is recommended not to change the defaults. This CLI is to be used only in case of exceptions (e.g., to set the sample-rate to the nearest power-of-2 if there are hardware restrictions in using the defaults) + +- Usage: + ``` + config sflow interface sample-rate + ``` + + - Parameters: + - interface-name: specify the interface for which the sampling rate value is to be set + - value: value is the average number of packets skipped before the sample is taken. "The sampling rate specifies random sampling probability as the ratio of packets observed to samples generated. For example a sampling rate of 256 specifies that, on average, 1 sample will be generated for every 256 packets observed." Valid range 256:8388608. + +- Example: + ``` + admin@sonic:~# sudo config sflow interface sample-rate Ethernet32 1000 + ``` +**config sflow polling-interval** + +This command is used to set the counter polling interval. Default is 20 seconds. + +- Usage: + ``` + config sflow polling-interval + ``` + + - Parameters: + - value: 0-300 seconds. Set polling-interval to 0 to disable counter polling + +- Example: + ``` + admin@sonic:~# sudo config sflow polling-interval 30 + ``` + + +Go Back To [Beginning of the document](#) or [Beginning of this section](#sflow) ## Startup & Running Configuration From fb4c00fd2dc5be461538309ef98d6e8ac86adcfd Mon Sep 17 00:00:00 2001 From: Mykola F <37578614+mykolaf@users.noreply.github.com> Date: Sun, 22 Mar 2020 03:32:08 +0200 Subject: [PATCH 020/111] [decode-syseeprom] fix getattribute check for sime platforms (#835) For some platforms which implement the eeprom plugin by inheriting from TlvInfoDecoder, the line t.getattr('read_eeprom_db', None) == None: results in an error: AttributeError: 'board' object has no attribute 'getattr' The cause for that is that TlvInfoDecoder inherits from object, and looks like for object and derived classes there is no __getattr__. Signed-off-by: Mykola Faryma --- scripts/decode-syseeprom | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 20decdd9c8..314c1407d1 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -55,7 +55,7 @@ def main(): # Currently, don't support eeprom db on Arista platform platforms_without_eeprom_db = ['arista', 'kvm'] if any(platform in platform_path for platform in platforms_without_eeprom_db)\ - or t.getattr('read_eeprom_db', None) == None: + or getattr(t, 'read_eeprom_db', None) == None: support_eeprom_db = False # From bd62a0addefaa74807aff4fb1fa45b9ef0e69841 Mon Sep 17 00:00:00 2001 From: byu343 Date: Sat, 21 Mar 2020 18:32:57 -0700 Subject: [PATCH 021/111] Fix kernel panic for irq after fast-reboot (#823) After the change in master branch updating SAI from 3.5.3.1m-25 to 3.7.3.2, we always found kernel panic after running fast-reboot command in testing SONiC with traffic. In the up path of fast-reboot, we can find warning messages like "unhandled irq 16 error" before kernel panic, which implies that some components are not properly closed in the down path. This fix will unload certain kernel modules by stopping opennsl before fast-reboot, which is suggested by BRCM. Note that another part of the fix is to add 'ExecStop=-/etc/init.d/opennsl-modules stop' to sonic-buildimage:platform/broadcom/saibcm-modules/systemd/opennsl-modules.service, which will be included in another pull request. --- scripts/fast-reboot | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 66309fa85e..02c035001b 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -529,6 +529,13 @@ then systemctl stop nps-modules-`uname -r`.service || debug "Ignore stopping nps service error $?" fi +# Stop opennsl modules for Broadcom platform +if [[ "$sonic_asic_type" = 'broadcom' ]]; +then + service_name=$(systemctl list-units --plain --no-pager --no-legend --type=service | grep opennsl | cut -f 1 -d' ') + systemctl stop "$service_name" +fi + # Update the reboot cause file to reflect that user issued this script # Upon next boot, the contents of this file will be used to determine the # cause of the previous reboot From 2f50ff16a4736db95fde5020c5a34dd9bf713db7 Mon Sep 17 00:00:00 2001 From: shine4chen <37530989+shine4chen@users.noreply.github.com> Date: Sun, 22 Mar 2020 09:37:29 +0800 Subject: [PATCH 022/111] return list for _get_optional_services() (#822) the return value of _get_optional_services() must be iterable This bug is imported from PR #453 Co-authored-by: shine.chen --- config/main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/config/main.py b/config/main.py index 820d4085e6..778fd429f4 100755 --- a/config/main.py +++ b/config/main.py @@ -413,8 +413,6 @@ def _get_optional_services(): config_db = ConfigDBConnector() config_db.connect() optional_services_dict = config_db.get_table('FEATURE') - if not optional_services_dict: - return None return optional_services_dict.keys() def _stop_services(): From 7cd85c742ac1c074a5f7e5e9914082c9e6ab173d Mon Sep 17 00:00:00 2001 From: lguohan Date: Sun, 22 Mar 2020 10:16:59 -0700 Subject: [PATCH 023/111] Revert "return list for _get_optional_services() (#822)" (#848) This reverts commit f1c79d553209ccdea1e64d76854cf3606461ccc3. --- config/main.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/config/main.py b/config/main.py index 778fd429f4..820d4085e6 100755 --- a/config/main.py +++ b/config/main.py @@ -413,6 +413,8 @@ def _get_optional_services(): config_db = ConfigDBConnector() config_db.connect() optional_services_dict = config_db.get_table('FEATURE') + if not optional_services_dict: + return None return optional_services_dict.keys() def _stop_services(): From 3d8ff33d1532a609d7c5fbc270ac0e21ee8f285b Mon Sep 17 00:00:00 2001 From: lguohan Date: Sun, 22 Mar 2020 10:19:39 -0700 Subject: [PATCH 024/111] Revert "add support for MCLAG (#453)" (#849) This reverts commit 8aea564f53d4a78bae7b6481ecb3167b541e8885. --- config/main.py | 41 ----------------------------------------- scripts/fast-reboot | 18 ------------------ 2 files changed, 59 deletions(-) diff --git a/config/main.py b/config/main.py index 820d4085e6..94ca98741f 100755 --- a/config/main.py +++ b/config/main.py @@ -409,14 +409,6 @@ def _abort_if_false(ctx, param, value): if not value: ctx.abort() -def _get_optional_services(): - config_db = ConfigDBConnector() - config_db.connect() - optional_services_dict = config_db.get_table('FEATURE') - if not optional_services_dict: - return None - return optional_services_dict.keys() - def _stop_services(): # on Mellanox platform pmon is stopped by syncd services_to_stop = [ @@ -447,17 +439,6 @@ def _stop_services(): log_error("Stopping {} failed with error {}".format(service, e)) raise - # For optional services they don't start by default - for service in _get_optional_services(): - (out, err) = run_command("systemctl status {}".format(service), return_output = True) - if not err and 'Active: active (running)' in out: - try: - click.echo("Stopping service {} ...".format(service)) - run_command("systemctl stop {}".format(service)) - except SystemExit as e: - log_error("Stopping {} failed with error {}".format(service, e)) - raise - def _reset_failed_services(): services_to_reset = [ 'bgp', @@ -493,17 +474,6 @@ def _reset_failed_services(): log_error("Failed to reset failed status for service {}".format(service)) raise - # For optional services they don't start by default - for service in _get_optional_services(): - (out, err) = run_command("systemctl is-enabled {}".format(service), return_output = True) - if not err and 'enabled' in out: - try: - click.echo("Resetting failed status for service {} ...".format(service)) - run_command("systemctl reset-failed {}".format(service)) - except SystemExit as e: - log_error("Failed to reset failed status for service {}".format(service)) - raise - def _restart_services(): # on Mellanox platform pmon is started by syncd services_to_restart = [ @@ -538,17 +508,6 @@ def _restart_services(): log_error("Restart {} failed with error {}".format(service, e)) raise - # For optional services they don't start by default - for service in _get_optional_services(): - (out, err) = run_command("systemctl is-enabled {}".format(service), return_output = True) - if not err and 'enabled' in out: - try: - click.echo("Restarting service {} ...".format(service)) - run_command("systemctl restart {}".format(service)) - except SystemExit as e: - log_error("Restart {} failed with error {}".format(service, e)) - raise - def is_ipaddress(val): """ Validate if an entry is a valid IP """ if not val: diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 02c035001b..faea099806 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -430,24 +430,6 @@ debug "Stopped bgp ..." docker kill lldp &> /dev/null || debug "Docker lldp is not running ($?) ..." systemctl stop lldp -if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - if echo $(docker ps) | grep -q iccpd; then - docker kill iccpd > /dev/null || [ $? == 1 ] - fi -fi - -# Stop iccpd gracefully -if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then - if echo $(docker ps) | grep -q iccpd; then - debug "Stopping iccpd ..." - # Send USR1 signal to iccpd to stop it - # It will prepare iccpd for warm-reboot - # Note: We must send USR1 signal before syncd, or some state of iccpd maybe lost - docker exec -i iccpd pkill -USR1 iccpd || [ $? == 1 ] > /dev/null - debug "Stopped iccpd ..." - fi -fi - if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then # Kill teamd processes inside of teamd container with SIGUSR2 to allow them to send last LACP frames # We call `docker kill teamd` to ensure the container stops as quickly as possible, From 66b20f10210b43168677d7589024f2dad9eebf9a Mon Sep 17 00:00:00 2001 From: Stephen Sun <5379172+stephenxs@users.noreply.github.com> Date: Tue, 24 Mar 2020 13:22:29 +0800 Subject: [PATCH 025/111] [config] Add sflow to _reset_failed_services (#850) --- config/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 94ca98741f..bb40a1901b 100755 --- a/config/main.py +++ b/config/main.py @@ -455,7 +455,8 @@ def _reset_failed_services(): 'swss', 'syncd', 'teamd', - 'nat' + 'nat', + 'sflow' ] generated_services_list = _get_sonic_generated_services() From 049c379c70d2a76673c3ccb6cba42ecc6b726fc9 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Tue, 24 Mar 2020 13:06:31 +0200 Subject: [PATCH 026/111] [fwutil]: Use overlay driver when mounting next image filesystem (#825) * [fwutil]: Use overlay driver when mounting next image filesystem. Signed-off-by: Nazarii Hnydyn * [fwutil]: Update command reference. Signed-off-by: Nazarii Hnydyn --- doc/Command-Reference.md | 2 +- fwutil/lib.py | 51 ++++++++++++++++++++++++++++++---------- fwutil/log.py | 3 +++ fwutil/main.py | 12 ++++++---- 4 files changed, 50 insertions(+), 18 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index c6f3fbfd63..5c41983cb0 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -4117,7 +4117,7 @@ Supported options: 2. -f|--force - install FW regardless the current version 3. -i|--image - update FW using current/next SONiC image -Note: the default option is --image=current +Note: the default option is --image=current (current/next values are taken from `sonic_installer list`) Go Back To [Beginning of the document](#) or [Beginning of this section](#platform-component-firmware) diff --git a/fwutil/lib.py b/fwutil/lib.py index 60377c283a..b065c9eefa 100755 --- a/fwutil/lib.py +++ b/fwutil/lib.py @@ -201,43 +201,67 @@ class SquashFs(object): OS_PREFIX = "SONiC-OS-" FS_PATH_TEMPLATE = "/host/image-{}/fs.squashfs" + FS_RW_TEMPLATE = "/host/image-{}/rw" + FS_WORK_TEMPLATE = "/host/image-{}/work" FS_MOUNTPOINT_TEMPLATE = "/tmp/image-{}-fs" - def __init__(self): - current_image = self.__get_current_image() - next_image = self.__get_next_image() - - if current_image == next_image: - raise RuntimeError("Next boot image is not set") + OVERLAY_MOUNTPOINT_TEMPLATE = "/tmp/image-{}-overlay" - image_stem = next_image.lstrip(self.OS_PREFIX) + def __init__(self): + image_stem = self.next_image.lstrip(self.OS_PREFIX) self.fs_path = self.FS_PATH_TEMPLATE.format(image_stem) + self.fs_rw = self.FS_RW_TEMPLATE.format(image_stem) + self.fs_work = self.FS_WORK_TEMPLATE.format(image_stem) self.fs_mountpoint = self.FS_MOUNTPOINT_TEMPLATE.format(image_stem) - def __get_current_image(self): + self.overlay_mountpoint = self.OVERLAY_MOUNTPOINT_TEMPLATE.format(image_stem) + + def get_current_image(self): cmd = "sonic_installer list | grep 'Current: ' | cut -f2 -d' '" output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) return output.rstrip(NEWLINE) - def __get_next_image(self): + def get_next_image(self): cmd = "sonic_installer list | grep 'Next: ' | cut -f2 -d' '" output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) return output.rstrip(NEWLINE) + def is_next_boot_set(self): + return self.current_image != self.next_image + def mount_next_image_fs(self): - if os.path.ismount(self.fs_mountpoint): + if os.path.ismount(self.fs_mountpoint) or os.path.ismount(self.overlay_mountpoint): self.umount_next_image_fs() os.mkdir(self.fs_mountpoint) - cmd = "mount -t squashfs {} {}".format(self.fs_path, self.fs_mountpoint) + cmd = "mount -t squashfs {} {}".format( + self.fs_path, + self.fs_mountpoint + ) subprocess.check_call(cmd, shell=True) - return self.fs_mountpoint + os.mkdir(self.overlay_mountpoint) + cmd = "mount -n -r -t overlay -o lowerdir={},upperdir={},workdir={} overlay {}".format( + self.fs_mountpoint, + self.fs_rw, + self.fs_work, + self.overlay_mountpoint + ) + subprocess.check_call(cmd, shell=True) + + return self.overlay_mountpoint def umount_next_image_fs(self): + if os.path.ismount(self.overlay_mountpoint): + cmd = "umount -rf {}".format(self.overlay_mountpoint) + subprocess.check_call(cmd, shell=True) + + if os.path.exists(self.overlay_mountpoint): + os.rmdir(self.overlay_mountpoint) + if os.path.ismount(self.fs_mountpoint): cmd = "umount -rf {}".format(self.fs_mountpoint) subprocess.check_call(cmd, shell=True) @@ -245,6 +269,9 @@ def umount_next_image_fs(self): if os.path.exists(self.fs_mountpoint): os.rmdir(self.fs_mountpoint) + current_image = property(fget=get_current_image) + next_image = property(fget=get_next_image) + class PlatformComponentsParser(object): """ diff --git a/fwutil/log.py b/fwutil/log.py index 0580e4bb27..a686c437ef 100755 --- a/fwutil/log.py +++ b/fwutil/log.py @@ -124,3 +124,6 @@ def log_fw_install_end(self, component, firmware, status, exception=None): def print_error(self, msg): click.echo("Error: {}.".format(msg)) + + def print_warning(self, msg): + click.echo("Warning: {}.".format(msg)) diff --git a/fwutil/main.py b/fwutil/main.py index c1443627c1..7f03e54ab0 100755 --- a/fwutil/main.py +++ b/fwutil/main.py @@ -227,14 +227,16 @@ def update(ctx, yes, force, image): squashfs = None try: - cup = None + cup = ComponentUpdateProvider() if image == IMAGE_NEXT: squashfs = SquashFs() - fs_path = squashfs.mount_next_image_fs() - cup = ComponentUpdateProvider(fs_path) - else: - cup = ComponentUpdateProvider() + + if squashfs.is_next_boot_set(): + fs_path = squashfs.mount_next_image_fs() + cup = ComponentUpdateProvider(fs_path) + else: + log_helper.print_warning("Next boot is set to current: fallback to defaults") click.echo(cup.get_status(force)) From 7602508ab4a06e580ac75737329baac9c1fd195e Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Tue, 24 Mar 2020 14:32:09 +0200 Subject: [PATCH 027/111] Fix for adding L3 interface to Vlan group (#826) Signed-off-by: Shlomi Bitton --- config/main.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/config/main.py b/config/main.py index bb40a1901b..3a30756dd2 100755 --- a/config/main.py +++ b/config/main.py @@ -1065,6 +1065,7 @@ def add_vlan_member(ctx, vid, interface_name, untagged): db = ctx.obj['db'] vlan_name = 'Vlan{}'.format(vid) vlan = db.get_entry('VLAN', vlan_name) + interface_table = db.get_table('INTERFACE') if get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(interface_name) @@ -1084,6 +1085,10 @@ def add_vlan_member(ctx, vid, interface_name, untagged): else: ctx.fail("{} is already a member of {}".format(interface_name, vlan_name)) + for entry in interface_table: + if (interface_name == entry[0]): + ctx.fail("{} is a L3 interface!".format(interface_name)) + members.append(interface_name) vlan['members'] = members db.set_entry('VLAN', vlan_name, vlan) From 635bccb120ebdf597a1920adad220ee486665085 Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Tue, 24 Mar 2020 18:45:20 +0200 Subject: [PATCH 028/111] [fwutil]: Fix component table layout. (#831) Signed-off-by: Nazarii Hnydyn --- fwutil/lib.py | 133 +++++++++++++++++++++----------------------------- 1 file changed, 55 insertions(+), 78 deletions(-) diff --git a/fwutil/lib.py b/fwutil/lib.py index b065c9eefa..156fe47516 100755 --- a/fwutil/lib.py +++ b/fwutil/lib.py @@ -552,6 +552,7 @@ def get_status(self, force): append_chassis_name = self.is_chassis_has_components() append_module_na = not self.is_modular_chassis() + module_name = NA for chassis_name, chassis_component_map in self.chassis_component_map.items(): for chassis_component_name, chassis_component in chassis_component_map.items(): @@ -565,17 +566,6 @@ def get_status(self, force): status = self.FW_STATUS_UP_TO_DATE info = NA - if append_chassis_name: - append_chassis_name = False - else: - chassis_name = EMPTY - - if append_module_na: - module_name = NA - append_module_na = False - else: - module_name = EMPTY - if component: firmware_path = component[self.__pcp.FIRMWARE_KEY] firmware_version_available = component[self.__pcp.VERSION_KEY] @@ -590,8 +580,8 @@ def get_status(self, force): status_table.append( [ - chassis_name, - module_name, + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_na else EMPTY, chassis_component_name, firmware_path, firmware_version, @@ -600,11 +590,19 @@ def get_status(self, force): ] ) + if append_chassis_name: + append_chassis_name = False + + if append_module_na: + append_module_na = False + append_chassis_name = not self.is_chassis_has_components() + chassis_name = self.chassis.get_name() if self.is_modular_chassis(): for module_name, module_component_map in self.module_component_map.items(): append_module_name = True + for module_component_name, module_component in module_component_map.items(): component = self.__pcp.module_component_map[module_name][module_component_name] @@ -616,17 +614,6 @@ def get_status(self, force): status = self.FW_STATUS_UP_TO_DATE info = NA - if append_chassis_name: - chassis_name = self.chassis.get_name() - append_chassis_name = False - else: - chassis_name = EMPTY - - if append_module_name: - append_module_name = False - else: - module_name = EMPTY - if component: firmware_path = component[self.__pcp.FIRMWARE_KEY] firmware_version_available = component[self.__pcp.VERSION_KEY] @@ -641,8 +628,8 @@ def get_status(self, force): status_table.append( [ - chassis_name, - module_name, + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_name else EMPTY, module_component_name, firmware_path, firmware_version, @@ -651,6 +638,12 @@ def get_status(self, force): ] ) + if append_chassis_name: + append_chassis_name = False + + if append_module_name: + append_module_name = False + return tabulate(status_table, self.STATUS_HEADER, tablefmt=self.FORMAT) def update_firmware(self, force): @@ -658,6 +651,7 @@ def update_firmware(self, force): append_chassis_name = self.is_chassis_has_components() append_module_na = not self.is_modular_chassis() + module_name = NA for chassis_name, chassis_component_map in self.chassis_component_map.items(): for chassis_component_name, chassis_component in chassis_component_map.items(): @@ -672,17 +666,6 @@ def update_firmware(self, force): status = self.FW_STATUS_UP_TO_DATE - if append_chassis_name: - append_chassis_name = False - else: - chassis_name = EMPTY - - if append_module_na: - module_name = NA - append_module_na = False - else: - module_name = EMPTY - if component: firmware_path = component[self.__pcp.FIRMWARE_KEY] firmware_version_available = component[self.__pcp.VERSION_KEY] @@ -712,14 +695,21 @@ def update_firmware(self, force): status_table.append( [ - chassis_name, - module_name, + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_na else EMPTY, chassis_component_name, status, ] ) + if append_chassis_name: + append_chassis_name = False + + if append_module_na: + append_module_na = False + append_chassis_name = not self.is_chassis_has_components() + chassis_name = self.chassis.get_name() if self.is_modular_chassis(): for module_name, module_component_map in self.module_component_map.items(): @@ -738,17 +728,6 @@ def update_firmware(self, force): status = self.FW_STATUS_UP_TO_DATE - if append_chassis_name: - chassis_name = self.chassis.get_name() - append_chassis_name = False - else: - chassis_name = EMPTY - - if append_module_name: - append_module_name = False - else: - module_name = EMPTY - if component: firmware_path = component[self.__pcp.FIRMWARE_KEY] firmware_version_available = component[self.__pcp.VERSION_KEY] @@ -778,13 +757,19 @@ def update_firmware(self, force): status_table.append( [ - chassis_name, - module_name, + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_name else EMPTY, module_component_name, status, ] ) + if append_chassis_name: + append_chassis_name = False + + if append_module_name: + append_module_name = False + return tabulate(status_table, self.RESULT_HEADER, tablefmt=self.FORMAT) @@ -803,34 +788,31 @@ def get_status(self): append_chassis_name = self.is_chassis_has_components() append_module_na = not self.is_modular_chassis() + module_name = NA for chassis_name, chassis_component_map in self.chassis_component_map.items(): for chassis_component_name, chassis_component in chassis_component_map.items(): firmware_version = chassis_component.get_firmware_version() description = chassis_component.get_description() - if append_chassis_name: - append_chassis_name = False - else: - chassis_name = EMPTY - - if append_module_na: - module_name = NA - append_module_na = False - else: - module_name = EMPTY - status_table.append( [ - chassis_name, - module_name, + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_na else EMPTY, chassis_component_name, firmware_version, description ] ) + if append_chassis_name: + append_chassis_name = False + + if append_module_na: + append_module_na = False + append_chassis_name = not self.is_chassis_has_components() + chassis_name = self.chassis.get_name() if self.is_modular_chassis(): for module_name, module_component_map in self.module_component_map.items(): @@ -840,25 +822,20 @@ def get_status(self): firmware_version = module_component.get_firmware_version() description = module_component.get_description() - if append_chassis_name: - chassis_name = self.chassis.get_name() - append_chassis_name = False - else: - chassis_name = EMPTY - - if append_module_name: - append_module_name = False - else: - module_name = EMPTY - status_table.append( [ - chassis_name, - module_name, + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_name else EMPTY, module_component_name, firmware_version, description ] ) + if append_chassis_name: + append_chassis_name = False + + if append_module_name: + append_module_name = False + return tabulate(status_table, self.HEADER, tablefmt=self.FORMAT) From 02a34ed17a810a8eaf78e9d543aed72a9f5e5150 Mon Sep 17 00:00:00 2001 From: bsun-sudo <56011247+bsun-sudo@users.noreply.github.com> Date: Wed, 25 Mar 2020 10:51:47 -0700 Subject: [PATCH 029/111] [ntp]: support "show ntp" with mgmt vrf based on linux os version (#858) Co-authored-by: Bing Sun --- show/main.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/show/main.py b/show/main.py index c5add8729e..f1c2b77f07 100755 --- a/show/main.py +++ b/show/main.py @@ -9,6 +9,7 @@ import subprocess import sys import ipaddress +from pkg_resources import parse_version import click from click_default_group import DefaultGroup @@ -2002,8 +2003,14 @@ def ntp(ctx, verbose): """Show NTP information""" ntpcmd = "ntpq -p -n" if is_mgmt_vrf_enabled(ctx) is True: - #ManagementVRF is enabled. Call ntpq using cgexec - ntpcmd = "cgexec -g l3mdev:mgmt ntpq -p -n" + #ManagementVRF is enabled. Call ntpq using "ip vrf exec" or cgexec based on linux version + os_info = os.uname() + release = os_info[2].split('-') + if parse_version(release[0]) > parse_version("4.9.0"): + ntpcmd = "ip vrf exec mgmt ntpq -p -n" + else: + ntpcmd = "cgexec -g l3mdev:mgmt ntpq -p -n" + run_command(ntpcmd, display_cmd=verbose) From 14269f825e662595561d63f11448c78c014f1ae4 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Thu, 26 Mar 2020 02:02:17 +0800 Subject: [PATCH 030/111] [Mellanox] Fix thermal control issue: use natural sort for fan status and thermal status (#836) * [thermal fix] use natural sort for fan status and thermal status * [thermal fix] set fan status to N/A when fan is removed * Adjust header name for show platform temperature output --- scripts/fanshow | 12 ++++++++---- scripts/tempershow | 6 +++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/scripts/fanshow b/scripts/fanshow index 75ad576cd4..81d0a9e2d8 100644 --- a/scripts/fanshow +++ b/scripts/fanshow @@ -8,6 +8,7 @@ import argparse from tabulate import tabulate from swsssdk import SonicV2Connector +from natsort import natsorted header = ['FAN', 'Speed', 'Direction', 'Presence', 'Status', 'Timestamp'] @@ -32,7 +33,7 @@ class FanShow(object): return table = [] - for key in keys: + for key in natsorted(keys): key_list = key.split('|') if len(key_list) != 2: # error data in DB, log it and ignore print('Warn: Invalid key in table FAN_INFO: {}'.format(key)) @@ -47,18 +48,21 @@ class FanShow(object): else: speed = '{}%'.format(data_dict[SPEED_FIELD_NAME]) except ValueError as e: - print('Warn: cannot convert speed value from {}'.format(data_dict[SPEED_FIELD_NAME])) speed = data_dict[SPEED_FIELD_NAME] presence = data_dict[PRESENCE_FIELD_NAME].lower() presence = 'Present' if presence == 'true' else 'Not Present' status = data_dict[STATUS_FIELD_NAME].lower() - status = 'OK' if status == 'true' else 'Not OK' + if status == 'true': + status = 'OK' + elif status == 'false': + status = 'Not OK' + else: + status = 'N/A' table.append((name, speed, data_dict[DIRECTION_FIELD_NAME], presence, status, data_dict[TIMESTAMP_FIELD_NAME])) if table: - table.sort() print(tabulate(table, header, tablefmt='simple', stralign='right')) else: print('No fan status data available\n') diff --git a/scripts/tempershow b/scripts/tempershow index d8ba6a645b..aabc4943ed 100644 --- a/scripts/tempershow +++ b/scripts/tempershow @@ -8,9 +8,10 @@ import argparse from tabulate import tabulate from swsssdk import SonicV2Connector +from natsort import natsorted -header = ['NAME', 'Temperature', 'High Threshold', 'Low Threshold', 'Critical High Threshold', 'Critical Low Threshold', 'Warning Status', 'Timestamp'] +header = ['Sensor', 'Temperature', 'High TH', 'Low TH', 'Crit High TH', 'Crit Low TH', 'Warning', 'Timestamp'] TEMPER_TABLE_NAME = 'TEMPERATURE_INFO' TEMPER_FIELD_NAME = 'temperature' @@ -34,7 +35,7 @@ class TemperShow(object): return table = [] - for key in keys: + for key in natsorted(keys): key_list = key.split('|') if len(key_list) != 2: # error data in DB, log it and ignore print('Warn: Invalid key in table {}: {}'.format(TEMPER_TABLE_NAME, key)) @@ -53,7 +54,6 @@ class TemperShow(object): )) if table: - table.sort() print(tabulate(table, header, tablefmt='simple', stralign='right')) else: print('No tempeature data available\n') From c70e9be13a38c011016c5cf6d9afef4449ed2a5e Mon Sep 17 00:00:00 2001 From: SuvarnaMeenakshi <50386592+SuvarnaMeenakshi@users.noreply.github.com> Date: Wed, 25 Mar 2020 12:28:07 -0700 Subject: [PATCH 031/111] [multi-asic]: Update reload of systemd services to support multi-asic platforms (#856) * Update stop, reset failed status and restart of systemd services to support multi-asic platforms. * Create function to avoid code duplication. * Fixed errors due to pervious commit and review comments. * Minor update to fix spacing. * Minor update to fix spacing. * Minor update to fix spacing. * For multi asic platform updated logic of stopping/restarting of services to ensure that the right instances are stopped and restarted if a service is both global and multi-instance. * Fixed log error message with incorrect number of parameterts. --- config/main.py | 103 +++++++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 46 deletions(-) diff --git a/config/main.py b/config/main.py index 3a30756dd2..28eebf8c0b 100755 --- a/config/main.py +++ b/config/main.py @@ -27,9 +27,14 @@ SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' SYSLOG_IDENTIFIER = "config" VLAN_SUB_INTERFACE_SEPARATOR = '.' +ASIC_CONF_FILENAME = 'asic.conf' INIT_CFG_FILE = '/etc/sonic/init_cfg.json' +SYSTEMCTL_ACTION_STOP="stop" +SYSTEMCTL_ACTION_RESTART="restart" +SYSTEMCTL_ACTION_RESET_FAILED="reset-failed" + # ========================== Syslog wrappers ========================== def log_debug(msg): @@ -69,6 +74,31 @@ def log_error(msg): # Helper functions # +# Execute action on list of systemd services +def execute_systemctl(list_of_services, action): + num_asic = _get_num_asic() + generated_services_list, generated_multi_instance_services = _get_sonic_generated_services(num_asic) + if ((generated_services_list == []) and + (generated_multi_instance_services == [])): + log_error("Failed to get generated services") + return + + for service in list_of_services: + if (service + '.service' in generated_services_list): + try: + click.echo("Executing {} of service {}...".format(action, service)) + run_command("systemctl {} {}".format(action, service)) + except SystemExit as e: + log_error("Failed to execute {} of service {} with error {}".format(action, service, e)) + raise + if (service + '.service' in generated_multi_instance_services): + for inst in range(num_asic): + try: + click.echo("Executing {} of service {}@{}...".format(action, service, inst)) + run_command("systemctl {} {}@{}.service".format(action, service, inst)) + except SystemExit as e: + log_error("Failed to execute {} of service {}@{} with error {}".format(action, service, inst, e)) + raise def run_command(command, display_cmd=False, ignore_error=False): """Run bash command and print output to stdout @@ -395,14 +425,34 @@ def _get_platform(): return tokens[1].strip() return '' -def _get_sonic_generated_services(): +def _get_num_asic(): + platform = _get_platform() + num_asic = 1 + asic_conf_file = os.path.join('/usr/share/sonic/device/', platform, ASIC_CONF_FILENAME) + if os.path.isfile(asic_conf_file): + with open(asic_conf_file) as conf_file: + for line in conf_file: + line_info = line.split('=') + if line_info[0].lower() == "num_asic": + num_asic = int(line_info[1]) + return num_asic + +def _get_sonic_generated_services(num_asic): if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH): return None generated_services_list = [] + generated_multi_instance_services = [] with open(SONIC_GENERATED_SERVICE_PATH) as generated_service_file: for line in generated_service_file: - generated_services_list.append(line.rstrip('\n')) - return None if not generated_services_list else generated_services_list + if '@' in line: + line = line.replace('@', '') + if num_asic > 1: + generated_multi_instance_services.append(line.rstrip('\n')) + else: + generated_services_list.append(line.rstrip('\n')) + else: + generated_services_list.append(line.rstrip('\n')) + return generated_services_list, generated_multi_instance_services # Callback for confirmation prompt. Aborts if user enters "n" def _abort_if_false(ctx, param, value): @@ -419,25 +469,11 @@ def _stop_services(): 'hostcfgd', 'nat' ] - generated_services_list = _get_sonic_generated_services() - - if generated_services_list is None: - log_error("Failed to get generated services") - return if asic_type == 'mellanox' and 'pmon' in services_to_stop: services_to_stop.remove('pmon') - for service in services_to_stop: - if service + '.service' not in generated_services_list: - continue - try: - click.echo("Stopping service {} ...".format(service)) - run_command("systemctl stop {}".format(service)) - - except SystemExit as e: - log_error("Stopping {} failed with error {}".format(service, e)) - raise + execute_systemctl(services_to_stop, SYSTEMCTL_ACTION_STOP) def _reset_failed_services(): services_to_reset = [ @@ -458,22 +494,9 @@ def _reset_failed_services(): 'nat', 'sflow' ] + execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED) - generated_services_list = _get_sonic_generated_services() - if generated_services_list is None: - log_error("Failed to get generated services") - return - - for service in services_to_reset: - if service + '.service' not in generated_services_list: - continue - try: - click.echo("Resetting failed status for service {} ...".format(service)) - run_command("systemctl reset-failed {}".format(service)) - except SystemExit as e: - log_error("Failed to reset failed status for service {}".format(service)) - raise def _restart_services(): # on Mellanox platform pmon is started by syncd @@ -490,24 +513,12 @@ def _restart_services(): 'nat', 'sflow', ] - generated_services_list = _get_sonic_generated_services() - - if generated_services_list is None: - log_error("Failed to get generated services") - return if asic_type == 'mellanox' and 'pmon' in services_to_restart: services_to_restart.remove('pmon') - for service in services_to_restart: - if service + '.service' not in generated_services_list: - continue - try: - click.echo("Restarting service {} ...".format(service)) - run_command("systemctl restart {}".format(service)) - except SystemExit as e: - log_error("Restart {} failed with error {}".format(service, e)) - raise + execute_systemctl(services_to_restart, SYSTEMCTL_ACTION_RESTART) + def is_ipaddress(val): """ Validate if an entry is a valid IP """ From 9a0be971af40c775e3498cd703b57b41b580a111 Mon Sep 17 00:00:00 2001 From: Wei Bai Date: Fri, 3 Apr 2020 03:57:54 -0700 Subject: [PATCH 032/111] [pfc] Add command line to enable/disable/show PFC (#796) Current PFC cli only allows operators to enable/disable per-port asymmetric PFC. I extend add new commands to 1) enable/disable PFC on a given priority of a given port and 2) check PFC configuration. I also link all these commands to show and config utilities. Signed-off-by: Wei Bai baiwei0427@gmail.com Co-authored-by: lguohan --- config/main.py | 22 ++++++- doc/Command-Reference.md | 87 ++++++++++++++++++++++++++++ pfc/main.py | 120 ++++++++++++++++++++++++++++++++------- show/main.py | 26 +++++++++ 4 files changed, 231 insertions(+), 24 deletions(-) diff --git a/config/main.py b/config/main.py index 28eebf8c0b..664feef82a 100755 --- a/config/main.py +++ b/config/main.py @@ -2158,7 +2158,7 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, verbose): # -# 'pfc' group ('config pfc ...') +# 'pfc' group ('config interface pfc ...') # @interface.group() @@ -2169,7 +2169,7 @@ def pfc(ctx): # -# 'pfc asymmetric' command +# 'pfc asymmetric' ('config interface pfc asymmetric ...') # @pfc.command() @@ -2185,6 +2185,24 @@ def asymmetric(ctx, interface_name, status): run_command("pfc config asymmetric {0} {1}".format(status, interface_name)) +# +# 'pfc priority' command ('config interface pfc priority ...') +# + +@pfc.command() +@click.argument('interface_name', metavar='', required=True) +@click.argument('priority', type=click.Choice([str(x) for x in range(8)])) +@click.argument('status', type=click.Choice(['on', 'off'])) +@click.pass_context +def priority(ctx, interface_name, priority, status): + """Set PFC priority configuration.""" + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + run_command("pfc config priority {0} {1} {2}".format(status, interface_name, priority)) + # # 'platform' group ('config platform ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 5c41983cb0..f0e2b4e3e4 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2520,6 +2520,34 @@ VLAN interface names take the form of `vlan`. E.g., VLAN 100 will be na admin@sonic:~$ sudo config interface vlan100 ip remove 10.11.12.13/24 ``` +**config interface pfc priority (on | off)** + +This command is used to set PFC on a given priority of a given interface to either "on" or "off". Once it is successfully configured, it will show current losses priorities on the given interface. Otherwise, it will show error information + +- Example: + *Versions >= 201904* + ``` + admin@sonic:~$ sudo config interface pfc priority Ethernet0 3 off + + Interface Lossless priorities + ----------- --------------------- + Ethernet0 4 + + admin@sonic:~$ sudo config interface pfc priority Ethernet0 8 off + Usage: pfc config priority [OPTIONS] STATUS INTERFACE PRIORITY + + Error: Invalid value for "priority": invalid choice: 8. (choose from 0, 1, 2, 3, 4, 5, 6, 7) + + admin@sonic:~$ sudo config interface pfc priority Ethernet101 3 off + Cannot find interface Ethernet101 + + admin@sonic:~$ sudo config interface pfc priority Ethernet0 3 on + + Interface Lossless priorities + ----------- --------------------- + Ethernet0 3,4 + ``` + **config interface pfc asymmetric (Versions >= 201904)** **config interface pfc asymmetric (Versions <= 201811)** @@ -4307,6 +4335,65 @@ This command displays the details of Rx & Tx priority-flow-control (pfc) for all admin@sonic:~$ sonic-clear pfccounters ``` +**show pfc asymmetric** + +This command displays the status of asymmetric PFC for all interfaces or a given interface. + +- Usage: + ``` + show pfc asymmetric [] + ``` + +- Example: + ``` + admin@sonic:~$ show pfc asymmetric + + Interface Asymmetric + ----------- ------------ + Ethernet0 off + Ethernet2 off + Ethernet4 off + Ethernet6 off + Ethernet8 off + Ethernet10 off + Ethernet12 off + Ethernet14 off + + admin@sonic:~$ show pfc asymmetric Ethernet0 + + Interface Asymmetric + ----------- ------------ + Ethernet0 off + ``` + +**show pfc priority** + +This command displays the lossless priorities for all interfaces or a given interface. + +- Usage: + ``` + show pfc priority [] + ``` + +- Example: + ``` + admin@sonic:~$ show pfc priority + + Interface Lossless priorities + ----------- --------------------- + Ethernet0 3,4 + Ethernet2 3,4 + Ethernet8 3,4 + Ethernet10 3,4 + Ethernet16 3,4 + + admin@sonic:~$ show pfc priority Ethernet0 + + Interface Lossless priorities + ----------- --------------------- + Ethernet0 3,4 + ``` + #### Queue And Priority-Group This sub-section explains the following queue parameters that can be displayed using "show queue" command. diff --git a/pfc/main.py b/pfc/main.py index c2f6e0d31f..52ac4ed122 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -6,12 +6,13 @@ from tabulate import tabulate from natsort import natsorted +ALL_PRIORITIES = [str(x) for x in range(8)] +PRIORITY_STATUS = ['on', 'off'] def configPfcAsym(interface, pfc_asym): """ PFC handler to configure asymmentric PFC. """ - configdb = swsssdk.ConfigDBConnector() configdb.connect() @@ -22,11 +23,6 @@ def showPfcAsym(interface): """ PFC handler to display asymmetric PFC information. """ - - i = {} - table = [] - key = [] - header = ('Interface', 'Asymmetric') configdb = swsssdk.ConfigDBConnector() @@ -37,7 +33,10 @@ def showPfcAsym(interface): else: db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') + table = [] + for i in db_keys or [None]: + key = None if i: key = i.split('|')[-1] @@ -51,37 +50,114 @@ def showPfcAsym(interface): print tabulate(sorted_table, headers=header, tablefmt="simple", missingval="") print '\n' +def configPfcPrio(status, interface, priority): + configdb = swsssdk.ConfigDBConnector() + configdb.connect() -@click.group() -def cli(): + if interface not in configdb.get_keys('PORT_QOS_MAP'): + print 'Cannot find interface {0}'.format(interface) + return + + """Current lossless priorities on the interface""" + entry = configdb.get_entry('PORT_QOS_MAP', interface) + enable_prio = entry.get('pfc_enable').split(',') + + """Avoid '' in enable_prio""" + enable_prio = [x.strip() for x in enable_prio if x.strip()] + + if status == 'on' and priority in enable_prio: + print 'Priority {0} has already been enabled on {1}'.format(priority, interface) + return + + if status == 'off' and priority not in enable_prio: + print 'Priority {0} is not enabled on {1}'.format(priority, interface) + return + + if status == 'on': + enable_prio.append(priority) + + else: + enable_prio.remove(priority) + + enable_prio.sort() + configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) + + """Show the latest PFC configuration""" + showPfcPrio(interface) + +def showPfcPrio(interface): """ - Utility entry point. + PFC handler to display PFC enabled priority information. """ + header = ('Interface', 'Lossless priorities') + table = [] + + configdb = swsssdk.ConfigDBConnector() + configdb.connect() + + """Get all the interfaces with QoS map information""" + intfs = configdb.get_keys('PORT_QOS_MAP') + + """The user specifies an interface but we cannot find it""" + if interface and interface not in intfs: + print 'Cannot find interface {0}'.format(interface) + return + + if interface: + intfs = [interface] + + for intf in intfs: + entry = configdb.get_entry('PORT_QOS_MAP', intf) + table.append([intf, entry.get('pfc_enable', 'N/A')]) + + sorted_table = natsorted(table) + print '\n' + print tabulate(sorted_table, headers=header, tablefmt="simple", missingval="") + print '\n' + +@click.group() +def cli(): + """PFC Command Line""" pass - @cli.group() def config(): - """Config PFC information""" + """Config PFC""" pass - -@config.command() -@click.argument('status', type=click.Choice(['on', 'off'])) -@click.argument('interface', type=click.STRING) -def asymmetric(status, interface): - """Set asymmetric PFC configuration.""" - configPfcAsym(interface, status) - - @cli.group() def show(): """Show PFC information""" pass +@click.command() +@click.argument('status', type=click.Choice(PRIORITY_STATUS)) +@click.argument('interface', type=click.STRING) +def configAsym(status, interface): + """Configure asymmetric PFC on a given port.""" + configPfcAsym(interface, status) -@show.command() +@click.command() +@click.argument('status', type=click.Choice(PRIORITY_STATUS)) +@click.argument('interface', type=click.STRING) +@click.argument('priority', type=click.Choice(ALL_PRIORITIES)) +def configPrio(status, interface, priority): + """Configure PFC on a given priority.""" + configPfcPrio(status, interface, priority) + +@click.command() @click.argument('interface', type=click.STRING, required=False) -def asymmetric(interface): +def showAsym(interface): """Show asymmetric PFC information""" showPfcAsym(interface) + +@click.command() +@click.argument('interface', type=click.STRING, required=False) +def showPrio(interface): + """Show PFC priority information""" + showPfcPrio(interface) + +config.add_command(configAsym, "asymmetric") +config.add_command(configPrio, "priority") +show.add_command(showAsym, "asymmetric") +show.add_command(showPrio, "priority") \ No newline at end of file diff --git a/show/main.py b/show/main.py index f1c2b77f07..f5a5a1340d 100755 --- a/show/main.py +++ b/show/main.py @@ -1044,6 +1044,32 @@ def counters(verbose): run_command(cmd, display_cmd=verbose) +@pfc.command() +@click.argument('interface', type=click.STRING, required=False) +def priority(interface): + """Show pfc priority""" + cmd = 'pfc show priority' + if interface is not None and get_interface_mode() == "alias": + interface = iface_alias_converter.alias_to_name(interface) + + if interface is not None: + cmd += ' {0}'.format(interface) + + run_command(cmd) + +@pfc.command() +@click.argument('interface', type=click.STRING, required=False) +def asymmetric(interface): + """Show asymmetric pfc""" + cmd = 'pfc show asymmetric' + if interface is not None and get_interface_mode() == "alias": + interface = iface_alias_converter.alias_to_name(interface) + + if interface is not None: + cmd += ' {0}'.format(interface) + + run_command(cmd) + # 'pfcwd' subcommand ("show pfcwd...") @cli.group(cls=AliasedGroup, default_if_no_args=False) def pfcwd(): From a531039592474412487199f1842142f5514531f8 Mon Sep 17 00:00:00 2001 From: Neetha John Date: Fri, 3 Apr 2020 03:58:47 -0700 Subject: [PATCH 033/111] [watermarkstat] Fix issue of fields overwritten before display (#862) Fix the bug where the unicast/multicast queue watermark display was incorrect - How I did it Calculate the header list dynamically for PG and queue display Display the correct queue numbers in the output instead of always starting the index from 0 Calculate the display position relative to the starting index Signed-off-by: Neetha John --- scripts/watermarkstat | 48 ++++++++++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 14 deletions(-) diff --git a/scripts/watermarkstat b/scripts/watermarkstat index b2a6c5f0ad..9b446248da 100644 --- a/scripts/watermarkstat +++ b/scripts/watermarkstat @@ -15,9 +15,6 @@ from natsort import natsorted from tabulate import tabulate -headerPg = ['Port', 'PG0', 'PG1', 'PG2', 'PG3', 'PG4', 'PG5', 'PG6', 'PG7'] -headerUc = ['Port', 'UC0', 'UC1', 'UC2', 'UC3', 'UC4', 'UC5', 'UC6', 'UC7'] -headerMc = ['Port', 'MC8', 'MC9', 'MC10', 'MC11', 'MC12', 'MC13', 'MC14', 'MC15'] headerBufferPool = ['Pool', 'Bytes'] @@ -140,22 +137,22 @@ class Watermarkstat(object): "obj_map" : self.port_pg_map, "idx_func": self.get_pg_index, "wm_name" : "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES", - "header" : headerPg}, + "header_prefix": "PG"}, "pg_shared" : {"message" : "Ingress shared pool occupancy per PG:", "obj_map" : self.port_pg_map, "idx_func": self.get_pg_index, "wm_name" : "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES", - "header" : headerPg}, + "header_prefix": "PG"}, "q_shared_uni" : {"message" : "Egress shared pool occupancy per unicast queue:", "obj_map" : self.port_uc_queues_map, "idx_func": self.get_queue_index, "wm_name" : "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", - "header" : headerUc}, + "header_prefix": "UC"}, "q_shared_multi": {"message" : "Egress shared pool occupancy per multicast queue:", "obj_map" : self.port_mc_queues_map, "idx_func": self.get_queue_index, "wm_name" : "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES", - "header" : headerMc}, + "header_prefix": "MC"}, "buffer_pool" : {"message": "Shared pool maximum occupancy:", "wm_name": "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES", "header" : headerBufferPool} @@ -177,28 +174,48 @@ class Watermarkstat(object): return pg_index + def build_header(self, wm_type): + if wm_type is None: + print >> sys.stderr, "Header info is not available!" + sys.exit(1) + + self.header_list = ['Port'] + header_map = wm_type["obj_map"] + single_key = header_map.keys()[0] + header_len = len(header_map[single_key]) + min_idx = float("inf") + + for name, counter_oid in header_map[single_key].items(): + curr_idx = int(wm_type["idx_func"](counter_oid)) + min_idx = min(min_idx, curr_idx) + + self.min_idx = int(min_idx) + self.header_list += ["{}{}".format(wm_type["header_prefix"], idx) for idx in range(self.min_idx, self.min_idx + header_len)] + def get_counters(self, table_prefix, port_obj, idx_func, watermark): """ Get the counters from specific table. """ - fields = ["0"] * 8 + # header list contains the port name followed by the queues/pgs. fields is used to populate the queue/pg values + fields = ["0"]* (len(self.header_list) - 1) for name, obj_id in port_obj.items(): full_table_id = table_prefix + obj_id - pos = int(idx_func(obj_id)) % len(fields) + idx = int(idx_func(obj_id)) + pos = idx - self.min_idx counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, watermark) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = tuple(fields) - return cntr + return fields def print_all_stat(self, table_prefix, key): table = [] type = self.watermark_types[key] if key == 'buffer_pool': + self.header_list = type['header'] # Get stats for each buffer pool for buf_pool, bp_oid in natsorted(self.buffer_pool_name_to_oid_map.items()): key = table_prefix + bp_oid @@ -207,15 +224,18 @@ class Watermarkstat(object): data = STATUS_NA table.append((buf_pool, data)) else: + self.build_header(type) # Get stat for each port for port in natsorted(self.counter_port_name_map): + row_data = list() data = self.get_counters(table_prefix, type["obj_map"][port], type["idx_func"], type["wm_name"]) - table.append((port, data[0], data[1], data[2], data[3], - data[4], data[5], data[6], data[7])) + row_data.append(port) + row_data.extend(data) + table.append(tuple(row_data)) print(type["message"]) - print tabulate(table, type["header"], tablefmt='simple', stralign='right') + print tabulate(table, self.header_list, tablefmt='simple', stralign='right') def send_clear_notification(self, data): msg = json.dumps(data, separators=(',', ':')) From e2f62ebd923b9afec42562f8d246347fc0aaa2f8 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Fri, 3 Apr 2020 03:59:35 -0700 Subject: [PATCH 034/111] Explicitly specify command names with underscores (continued) (#852) --- config/main.py | 16 ++++++++-------- debug/main.py | 6 +++--- pddf_fanutil/main.py | 2 +- pddf_psuutil/main.py | 2 +- pddf_thermalutil/main.py | 2 +- pfcwd/main.py | 4 ++-- show/main.py | 8 ++++---- sonic_installer/main.py | 10 +++++----- undebug/main.py | 6 +++--- 9 files changed, 28 insertions(+), 28 deletions(-) diff --git a/config/main.py b/config/main.py index 664feef82a..62acb28882 100755 --- a/config/main.py +++ b/config/main.py @@ -770,7 +770,7 @@ def del_portchannel_member(ctx, portchannel_name, port_name): # # 'mirror_session' group ('config mirror_session ...') # -@config.group() +@config.group('mirror_session') def mirror_session(): pass @@ -875,7 +875,7 @@ def interval(poll_interval, verbose): run_command(cmd, display_cmd=verbose) -@pfcwd.command() +@pfcwd.command('counter_poll') @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.argument('counter_poll', type=click.Choice(['enable', 'disable'])) def counter_poll(counter_poll, verbose): @@ -885,7 +885,7 @@ def counter_poll(counter_poll, verbose): run_command(cmd, display_cmd=verbose) -@pfcwd.command() +@pfcwd.command('big_red_switch') @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.argument('big_red_switch', type=click.Choice(['enable', 'disable'])) def big_red_switch(big_red_switch, verbose): @@ -895,7 +895,7 @@ def big_red_switch(big_red_switch, verbose): run_command(cmd, display_cmd=verbose) -@pfcwd.command() +@pfcwd.command('start_default') @click.option('--verbose', is_flag=True, help="Enable verbose output") def start_default(verbose): """ Start PFC WD by default configurations """ @@ -945,7 +945,7 @@ def reload(): # # 'warm_restart' group ('config warm_restart ...') # -@config.group() +@config.group('warm_restart') @click.pass_context @click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection') def warm_restart(ctx, redis_unix_socket_path): @@ -1392,7 +1392,7 @@ def memory(kdump_memory): config_db.mod_entry("KDUMP", "config", {"memory": kdump_memory}) run_command("sonic-kdump-config --memory %s" % kdump_memory) -@kdump.command() +@kdump.command('num-dumps') @click.argument('kdump_num_dumps', metavar='', required=True, type=int) def num_dumps(kdump_num_dumps): """Set max number of dump files for kdump""" @@ -2109,7 +2109,7 @@ def delete(counter_name, verbose): # # 'add_reasons' subcommand ('config dropcounters add_reasons') # -@dropcounters.command() +@dropcounters.command('add-reasons') @click.argument("counter_name", type=str, required=True) @click.argument("reasons", type=str, required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") @@ -2122,7 +2122,7 @@ def add_reasons(counter_name, reasons, verbose): # # 'remove_reasons' subcommand ('config dropcounters remove_reasons') # -@dropcounters.command() +@dropcounters.command('remove-reasons') @click.argument("counter_name", type=str, required=True) @click.argument("reasons", type=str, required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") diff --git a/debug/main.py b/debug/main.py index eb099556c2..956c72404c 100755 --- a/debug/main.py +++ b/debug/main.py @@ -38,7 +38,7 @@ def bgp(): """debug bgp group """ pass - @bgp.command() + @bgp.command('allow-martians') def allow_martians(): """BGP allow martian next hops""" command = 'sudo vtysh -c "debug bgp allow-martians"' @@ -71,7 +71,7 @@ def keepalives(prefix_or_iface): command += '"' run_command(command) - @bgp.command() + @bgp.command('neighbor-events') @click.argument('prefix_or_iface', required=False) def neighbor_events(prefix_or_iface): """BGP Neighbor Events""" @@ -97,7 +97,7 @@ def pbr(additional): command += '"' run_command(command) - @bgp.command() + @bgp.command('update-groups') def update_groups(): """BGP update-groups""" command = 'sudo vtysh -c "debug bgp update-groups"' diff --git a/pddf_fanutil/main.py b/pddf_fanutil/main.py index dee001603b..68db948e63 100644 --- a/pddf_fanutil/main.py +++ b/pddf_fanutil/main.py @@ -178,7 +178,7 @@ def debug(): """pddf_fanutil debug commands""" pass -@debug.command() +@debug.command('dump-sysfs') def dump_sysfs(): """Dump all Fan related SysFS paths""" status = platform_fanutil.dump_sysfs() diff --git a/pddf_psuutil/main.py b/pddf_psuutil/main.py index 1a00a3b774..a034f34a08 100644 --- a/pddf_psuutil/main.py +++ b/pddf_psuutil/main.py @@ -180,7 +180,7 @@ def debug(): """pddf_psuutil debug commands""" pass -@debug.command() +@debug.command('dump-sysfs') def dump_sysfs(): """Dump all PSU related SysFS paths""" status = platform_psuutil.dump_sysfs() diff --git a/pddf_thermalutil/main.py b/pddf_thermalutil/main.py index c72e9d45dd..be91273a7b 100644 --- a/pddf_thermalutil/main.py +++ b/pddf_thermalutil/main.py @@ -105,7 +105,7 @@ def debug(): """pddf_thermalutil debug commands""" pass -@debug.command() +@debug.command('dump-sysfs') def dump_sysfs(): """Dump all Temp Sensor related SysFS paths""" status = platform_thermalutil.dump_sysfs() diff --git a/pfcwd/main.py b/pfcwd/main.py index 3a27e993a8..eb4daa15ec 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -254,7 +254,7 @@ def start_default(): configdb.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, "GLOBAL", pfcwd_info) # Enable/disable PFC WD counter polling -@cli.command() +@cli.command('counter_poll') @click.argument('counter_poll', type=click.Choice(['enable', 'disable'])) def counter_poll(counter_poll): """ Enable/disable counter polling """ @@ -267,7 +267,7 @@ def counter_poll(counter_poll): configdb.mod_entry("FLEX_COUNTER_TABLE", "PFCWD", pfcwd_info) # Enable/disable PFC WD BIG_RED_SWITCH mode -@cli.command() +@cli.command('big_red_switch') @click.argument('big_red_switch', type=click.Choice(['enable', 'disable'])) def big_red_switch(big_red_switch): """ Enable/disable BIG_RED_SWITCH mode """ diff --git a/show/main.py b/show/main.py index f5a5a1340d..566874d4ca 100755 --- a/show/main.py +++ b/show/main.py @@ -687,7 +687,7 @@ def mgmt_vrf(ctx,routes): # 'management_interface' group ("show management_interface ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(name='management_interface', cls=AliasedGroup, default_if_no_args=False) def management_interface(): """Show management interface parameters""" pass @@ -1095,7 +1095,7 @@ def stats(verbose): run_command(cmd, display_cmd=verbose) # 'naming_mode' subcommand ("show interfaces naming_mode") -@interfaces.command() +@interfaces.command('naming_mode') @click.option('--verbose', is_flag=True, help="Enable verbose output") def naming_mode(verbose): """Show interface naming_mode status""" @@ -2354,7 +2354,7 @@ def tacacs(): # # 'mirror_session' command ("show mirror_session ...") # -@cli.command() +@cli.command('mirror_session') @click.argument('session_name', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") def mirror_session(session_name, verbose): @@ -2626,7 +2626,7 @@ def line(): return -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(name='warm_restart', cls=AliasedGroup, default_if_no_args=False) def warm_restart(): """Show warm restart configuration and state""" pass diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 393055b3f6..32be097dd0 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -449,7 +449,7 @@ def list(): click.echo(image) # Set default image for boot -@cli.command() +@cli.command('set_default') @click.argument('image') def set_default(image): """ Choose image to boot from by default """ @@ -459,7 +459,7 @@ def set_default(image): # Set image for next boot -@cli.command() +@cli.command('set_next_boot') @click.argument('image') def set_next_boot(image): """ Choose image for next reboot (one time action) """ @@ -499,7 +499,7 @@ def remove(image): remove_image(image) # Retrieve version from binary image file and print to screen -@cli.command() +@cli.command('binary_version') @click.argument('binary_image_path') def binary_version(binary_image_path): """ Get version from local binary image file """ @@ -530,7 +530,7 @@ def cleanup(): click.echo("No image(s) to remove") # Upgrade docker image -@cli.command() +@cli.command('upgrade_docker') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='New docker image will be installed, continue?') @click.option('--cleanup_image', is_flag=True, help="Clean up old docker image") @@ -694,7 +694,7 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): sys.exit(1) # rollback docker image -@cli.command() +@cli.command('rollback_docker') @click.option('-y', '--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Docker image will be rolled back, continue?') @click.argument('container_name', metavar='', required=True, diff --git a/undebug/main.py b/undebug/main.py index 96bdd3c1f8..55f62ebb3f 100644 --- a/undebug/main.py +++ b/undebug/main.py @@ -38,7 +38,7 @@ def bgp(): """debug bgp group """ pass - @bgp.command() + @bgp.command('allow-martians') def allow_martians(): """BGP allow martian next hops""" command = 'sudo vtysh -c "no debug bgp allow-martians"' @@ -71,7 +71,7 @@ def keepalives(prefix_or_iface): command += '"' run_command(command) - @bgp.command() + @bgp.command('neighbor-events') @click.argument('prefix_or_iface', required=False) def neighbor_events(prefix_or_iface): """BGP Neighbor Events""" @@ -97,7 +97,7 @@ def pbr(additional): command += '"' run_command(command) - @bgp.command() + @bgp.command('update-groups') def update_groups(): """BGP update-groups""" command = 'sudo vtysh -c "no debug bgp update-groups"' From c98e7e355893d5846f9ab8cc805f005a17de9f11 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Tue, 7 Apr 2020 19:33:02 +0300 Subject: [PATCH 035/111] Fixes bug for PFCWD feature parameters (#838) What I did The feature allows setting 'detection_time', 'restoration_time' and 'pollling_interval' PFCWD to an interface. The 'pollling_interval' must be lower than 'detection_time' and 'restoration_time'. The fix is checking if there is a lower value of 'detection_time' or 'restoration_time' than the 'pollling_interval' value entered by the user in config DB, if yes exit with error code 1. How I did it Checking the config DB for interfaces PFCWD values. How to verify it Try adding 'pollling_interval' greater than one of the values of PFCWD interfaces. Previous command output (if the output of a command-line utility has changed) No Output. New command output (if the output of a command-line utility has changed) unable to use polling_interval = #ms, value is bigger or equal to the minimum in PFCWD table --- pfcwd/main.py | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/pfcwd/main.py b/pfcwd/main.py index eb4daa15ec..fbb944009c 100644 --- a/pfcwd/main.py +++ b/pfcwd/main.py @@ -3,6 +3,7 @@ import click import swsssdk import os +import sys from tabulate import tabulate from natsort import natsorted @@ -194,9 +195,25 @@ def interval(poll_interval): configdb.connect() pfcwd_info = {} if poll_interval is not None: + pfcwd_table = configdb.get_table(CONFIG_DB_PFC_WD_TABLE_NAME) + entry_min = 3000 + for entry in pfcwd_table: + if("Ethernet" not in entry): + continue + detection_time_entry_value = int(configdb.get_entry(CONFIG_DB_PFC_WD_TABLE_NAME, entry).get('detection_time')) + restoration_time_entry_value = int(configdb.get_entry(CONFIG_DB_PFC_WD_TABLE_NAME, entry).get('restoration_time')) + if ((detection_time_entry_value != None) and (detection_time_entry_value < entry_min)): + entry_min = detection_time_entry_value + entry_min_str = "detection time" + if ((restoration_time_entry_value != None) and (restoration_time_entry_value < entry_min)): + entry_min = restoration_time_entry_value + entry_min_str = "restoration time" + if entry_min < poll_interval: + print >> sys.stderr, "unable to use polling interval = {}ms, value is bigger than one of the configured {} values, please choose a smaller polling_interval".format(poll_interval,entry_min_str) + exit(1) + pfcwd_info['POLL_INTERVAL'] = poll_interval - - configdb.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, "GLOBAL", pfcwd_info) + configdb.mod_entry(CONFIG_DB_PFC_WD_TABLE_NAME, "GLOBAL", pfcwd_info) # Stop WD @cli.command() From 809813ebaedf48181547729e9d4996cca65500ba Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Tue, 7 Apr 2020 17:36:46 -0700 Subject: [PATCH 036/111] Fix Python warnings (#867) - Fix Python warnings flagged by "LGTM" plugin. - Also make all 'except' statements Python3-compliant --- clear/main.py | 2 +- config/aaa.py | 2 +- config/main.py | 12 +----------- config/mlnx.py | 2 +- psuutil/main.py | 6 +++--- scripts/boot_part | 9 +++++---- scripts/decode-syseeprom | 12 +++++++----- scripts/fdbshow | 2 +- scripts/natconfig | 10 ---------- scripts/nbrshow | 2 +- scripts/neighbor_advertiser | 1 - scripts/pcmping | 2 +- scripts/portstat | 2 +- scripts/sonic-kdump-config | 7 +++++-- sfputil/main.py | 8 ++++---- show/bgp_frr_v6.py | 6 +++--- show/bgp_quagga_v4.py | 6 +++--- show/bgp_quagga_v6.py | 6 +++--- show/main.py | 12 +++++------- sonic_installer/main.py | 4 ++-- ssdutil/main.py | 2 +- utilities_common/util_base.py | 8 ++++---- 22 files changed, 53 insertions(+), 70 deletions(-) diff --git a/clear/main.py b/clear/main.py index 9798b7b5a5..669c87567a 100755 --- a/clear/main.py +++ b/clear/main.py @@ -95,7 +95,7 @@ def get_routing_stack(): proc.wait() result = stdout.rstrip('\n') - except OSError, e: + except OSError as e: raise OSError("Cannot detect routing-stack") return (result) diff --git a/config/aaa.py b/config/aaa.py index 32e4feaf21..251ada2652 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -11,7 +11,7 @@ def is_ipaddress(val): return False try: netaddr.IPAddress(str(val)) - except: + except ValueError: return False return True diff --git a/config/main.py b/config/main.py index 62acb28882..e8f89a19dd 100755 --- a/config/main.py +++ b/config/main.py @@ -526,7 +526,7 @@ def is_ipaddress(val): return False try: netaddr.IPAddress(str(val)) - except: + except ValueError: return False return True @@ -715,7 +715,6 @@ def portchannel(ctx): config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} - pass @portchannel.command('add') @click.argument('portchannel_name', metavar='', required=True) @@ -962,7 +961,6 @@ def warm_restart(ctx, redis_unix_socket_path): TABLE_NAME_SEPARATOR = '|' prefix = 'WARM_RESTART_ENABLE_TABLE' + TABLE_NAME_SEPARATOR ctx.obj = {'db': config_db, 'state_db': state_db, 'prefix': prefix} - pass @warm_restart.command('enable') @click.argument('module', metavar='', default='system', required=False, type=click.Choice(["system", "swss", "bgp", "teamd"])) @@ -1032,7 +1030,6 @@ def vlan(ctx, redis_unix_socket_path): config_db = ConfigDBConnector(**kwargs) config_db.connect(wait_for_init=False) ctx.obj = {'db': config_db} - pass @vlan.command('add') @click.argument('vid', metavar='', required=True, type=int) @@ -1183,7 +1180,6 @@ def snmpagentaddress(ctx): config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} - pass @snmpagentaddress.command('add') @click.argument('agentip', metavar='', required=True) @@ -1233,7 +1229,6 @@ def snmptrap(ctx): config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} - pass @snmptrap.command('modify') @click.argument('ver', metavar='', type=click.Choice(['1', '2', '3']), required=True) @@ -1362,7 +1357,6 @@ def kdump(): """ Configure kdump """ if os.geteuid() != 0: exit("Root privileges are required for this operation") - pass @kdump.command() def disable(): @@ -1779,7 +1773,6 @@ def vrf(ctx): config_db.connect() ctx.obj = {} ctx.obj['config_db'] = config_db - pass @vrf.command('add') @click.argument('vrf_name', metavar='', required=True) @@ -2342,7 +2335,6 @@ def syslog_group(ctx): config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} - pass @syslog_group.command('add') @click.argument('syslog_ip_address', metavar='', required=True) @@ -2395,7 +2387,6 @@ def ntp(ctx): config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} - pass @ntp.command('add') @click.argument('ntp_ip_address', metavar='', required=True) @@ -2448,7 +2439,6 @@ def sflow(ctx): config_db = ConfigDBConnector() config_db.connect() ctx.obj = {'db': config_db} - pass # # 'sflow' command ('config sflow enable') diff --git a/config/mlnx.py b/config/mlnx.py index 330dd41ac3..5ad422d7cf 100644 --- a/config/mlnx.py +++ b/config/mlnx.py @@ -169,7 +169,7 @@ def sniffer_env_variable_set(enable, env_variable_name, env_variable_string=""): def restart_swss(): try: run_command(COMMAND_RESTART_SWSS) - except OSError, e: + except OSError as e: log_error("Not able to restart swss service, %s" % str(e), SNIFFER_SYSLOG_IDENTIFIER, True) return 1 return 0 diff --git a/psuutil/main.py b/psuutil/main.py index 70dd0c44ab..fd0b336436 100644 --- a/psuutil/main.py +++ b/psuutil/main.py @@ -84,7 +84,7 @@ def get_platform_and_hwsku(): stdout = proc.communicate()[0] proc.wait() hwsku = stdout.rstrip('\n') - except OSError, e: + except OSError as e: raise OSError("Cannot detect platform") return (platform, hwsku) @@ -108,14 +108,14 @@ def load_platform_psuutil(): try: module_file = "/".join([platform_path, "plugins", PLATFORM_SPECIFIC_MODULE_NAME + ".py"]) module = imp.load_source(PLATFORM_SPECIFIC_MODULE_NAME, module_file) - except IOError, e: + except IOError as e: log_error("Failed to load platform module '%s': %s" % (PLATFORM_SPECIFIC_MODULE_NAME, str(e)), True) return -1 try: platform_psuutil_class = getattr(module, PLATFORM_SPECIFIC_CLASS_NAME) platform_psuutil = platform_psuutil_class() - except AttributeError, e: + except AttributeError as e: log_error("Failed to instantiate '%s' class: %s" % (PLATFORM_SPECIFIC_CLASS_NAME, str(e)), True) return -2 diff --git a/scripts/boot_part b/scripts/boot_part index 42a9b639ff..5bcae549c2 100755 --- a/scripts/boot_part +++ b/scripts/boot_part @@ -62,12 +62,13 @@ def get_boot_partition(blkdev): ## Parse command output and return the current boot partition index for line in out.splitlines(): m = re.match(r'{0}(\d+) / .*'.format(blkdev), line) - if not m: continue + if not m: + continue index = m.group(1) return int(index) - else: - logger.error('Unexpected /proc/mounts output: %s', out) - return None + + logger.error('Unexpected /proc/mounts output: %s', out) + return None def set_boot_partition(blkdev, index): ## Mount the partition diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 314c1407d1..6ab1324091 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -19,7 +19,7 @@ try: import glob from sonic_device_util import get_machine_info from sonic_device_util import get_platform_info -except ImportError, e: +except ImportError as e: raise ImportError (str(e) + "- required module not found") PLATFORM_ROOT = '/usr/share/sonic/device' @@ -63,6 +63,8 @@ def main(): # run(t, opts, args, support_eeprom_db) + return 0 + #------------------------------------------------------------------------------- # # sets global variable "optcfg" @@ -103,7 +105,7 @@ def run(target, opts, args, support_eeprom_db): if not os.path.exists(CACHE_ROOT): try: os.makedirs(CACHE_ROOT) - except: + except OSError: pass if opts.init: for file in glob.glob(os.path.join(CACHE_ROOT, '*')): @@ -115,7 +117,7 @@ def run(target, opts, args, support_eeprom_db): # try: target.set_cache_name(os.path.join(CACHE_ROOT, CACHE_FILE)) - except: + except Exception: pass e = target.read_eeprom() @@ -124,7 +126,7 @@ def run(target, opts, args, support_eeprom_db): try: target.update_cache(e) - except: + except Exception: pass if opts.init: @@ -139,7 +141,7 @@ def run(target, opts, args, support_eeprom_db): elif opts.serial: try: serial = target.serial_number_str(e) - except NotImplemented, e: + except NotImplementedError as e: print e else: print serial or "Undefined." diff --git a/scripts/fdbshow b/scripts/fdbshow index 1c06d5a27d..6743caf3cd 100755 --- a/scripts/fdbshow +++ b/scripts/fdbshow @@ -85,7 +85,7 @@ class FdbShow(object): elif 'bvid' in fdb: try: vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) - except: + except Exception: vlan_id = fdb["bvid"] print "Failed to get Vlan id for bvid {}\n".format(fdb["bvid"]) self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,) + (fdb_type,)) diff --git a/scripts/natconfig b/scripts/natconfig index db5ea9b667..d94dc586ac 100644 --- a/scripts/natconfig +++ b/scripts/natconfig @@ -67,9 +67,7 @@ class NatConfig(object): for key,values in static_nat_dict.items(): ip_protocol = "all" - global_ip = "---" global_port = "---" - local_ip = "---" local_port = "---" nat_type = "dnat" twice_nat_id = "---" @@ -106,10 +104,6 @@ class NatConfig(object): return for key,values in static_napt_dict.items(): - ip_protocol = "all" - global_ip = "---" - global_port = "---" - local_ip = "---" local_port = "---" nat_type = "dnat" twice_nat_id = "---" @@ -151,8 +145,6 @@ class NatConfig(object): return for key,values in nat_pool_dict.items(): - pool_name = "---" - global_ip = "---" global_port = "---" if isinstance(key, unicode) is True: @@ -182,8 +174,6 @@ class NatConfig(object): return for key,values in nat_binding_dict.items(): - binding_name = "---" - pool_name = "---" access_list = "---" nat_type = "snat" twice_nat_id = "---" diff --git a/scripts/nbrshow b/scripts/nbrshow index 7438933fc3..3014492ed7 100644 --- a/scripts/nbrshow +++ b/scripts/nbrshow @@ -93,7 +93,7 @@ class NbrBase(object): elif 'bvid' in fdb: try: vlan_id = port_util.get_vlan_id_from_bvid(self.db, fdb["bvid"]) - except: + except Exception: vlan_id = fdb["bvid"] print "Failed to get Vlan id for bvid {}\n".format(fdb["bvid"]) self.bridge_mac_list.append((int(vlan_id),) + (fdb["mac"],) + (if_name,)) diff --git a/scripts/neighbor_advertiser b/scripts/neighbor_advertiser index 66213239a3..105f21795b 100644 --- a/scripts/neighbor_advertiser +++ b/scripts/neighbor_advertiser @@ -266,7 +266,6 @@ def get_vlan_addresses(vlan_interface): mac_addr = keys[1] except Exception: log_error('failed to get %s addresses from o.s.' % vlan_interface) - pass if not mac_addr: mac_addr = get_vlan_interface_mac_address(vlan_interface) diff --git a/scripts/pcmping b/scripts/pcmping index f754c2fda4..2ec42712d6 100755 --- a/scripts/pcmping +++ b/scripts/pcmping @@ -120,7 +120,7 @@ def create_socket(interface, portchannel): if iface == interface: exp_socket = s sockets.append(s) - except: + except Exception: sys.stderr.write("Unable to create socket. Check your permissions\n") sys.exit(1) return sockets, exp_socket diff --git a/scripts/portstat b/scripts/portstat index b628f564d8..8da9b27788 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -109,7 +109,7 @@ class Portstat(object): if speed is None: speed = PORT_RATE else: - speed = int(speed)/1000 + speed = int(speed)//1000 return speed def get_port_state(self, port_name): diff --git a/scripts/sonic-kdump-config b/scripts/sonic-kdump-config index d785e59ab8..eaf5ed3834 100755 --- a/scripts/sonic-kdump-config +++ b/scripts/sonic-kdump-config @@ -179,7 +179,7 @@ def get_kdump_memory(): #print(lines[0][p+2:]) #print('XXX') return lines[0][p+2:] - except: + except Exception: pass return "0M-2G:256M,2G-4G:320M,4G-8G:384M,8G-:448M" @@ -194,7 +194,7 @@ def get_kdump_num_dumps(): p = lines[0].find(': ') if p != -1: return int(lines[0][p+2:]) - except: + except Exception: pass return 3 @@ -337,6 +337,7 @@ def cmd_kdump_config_next(verbose): ## Disable kdump # # @param verbose If True, the function will display a few additional information +# @return True if the grub/cmdline cfg has changed, and False if it has not def kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file): write_use_kdump(0) @@ -358,6 +359,8 @@ def kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file if changed: rewrite_cfg(lines, cmdline_file) + return changed + ## Command: Disable kdump # # @param verbose If True, the function will display a few additional information diff --git a/sfputil/main.py b/sfputil/main.py index 89cd5fcfdf..18576ebd50 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -313,7 +313,7 @@ def get_platform_and_hwsku(): stdout = proc.communicate()[0] proc.wait() hwsku = stdout.rstrip('\n') - except OSError, e: + except OSError as e: raise OSError("Cannot detect platform") return (platform, hwsku) @@ -351,14 +351,14 @@ def load_platform_sfputil(): try: module_file = "/".join([platform_path, "plugins", PLATFORM_SPECIFIC_MODULE_NAME + ".py"]) module = imp.load_source(PLATFORM_SPECIFIC_MODULE_NAME, module_file) - except IOError, e: + except IOError as e: log_error("Failed to load platform module '%s': %s" % (PLATFORM_SPECIFIC_MODULE_NAME, str(e)), True) return -1 try: platform_sfputil_class = getattr(module, PLATFORM_SPECIFIC_CLASS_NAME) platform_sfputil = platform_sfputil_class() - except AttributeError, e: + except AttributeError as e: log_error("Failed to instantiate '%s' class: %s" % (PLATFORM_SPECIFIC_CLASS_NAME, str(e)), True) return -2 @@ -386,7 +386,7 @@ def cli(): try: port_config_file_path = get_path_to_port_config_file() platform_sfputil.read_porttab_mappings(port_config_file_path) - except Exception, e: + except Exception as e: log_error("Error reading port info (%s)" % str(e), True) sys.exit(3) diff --git a/show/bgp_frr_v6.py b/show/bgp_frr_v6.py index 9ff2ded3b6..46b20ad2bc 100644 --- a/show/bgp_frr_v6.py +++ b/show/bgp_frr_v6.py @@ -1,5 +1,5 @@ import click -from show.main import * +from show.main import ipv6, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ipv6.group(cls=AliasedGroup, default_if_no_args=False) +@ipv6.group() def bgp(): """Show IPv6 BGP (Border Gateway Protocol) information""" pass @@ -22,7 +22,7 @@ def summary(): try: device_output = run_command('sudo vtysh -c "show bgp ipv6 summary"', return_cmd=True) get_bgp_summary_extended(device_output) - except: + except Exception: run_command('sudo vtysh -c "show bgp ipv6 summary"') diff --git a/show/bgp_quagga_v4.py b/show/bgp_quagga_v4.py index 4883880682..73075e5aec 100644 --- a/show/bgp_quagga_v4.py +++ b/show/bgp_quagga_v4.py @@ -1,5 +1,5 @@ import click -from show.main import * +from show.main import ip, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ip.group(cls=AliasedGroup, default_if_no_args=False) +@ip.group() def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" pass @@ -22,7 +22,7 @@ def summary(): try: device_output = run_command('sudo vtysh -c "show ip bgp summary"', return_cmd=True) get_bgp_summary_extended(device_output) - except: + except Exception: run_command('sudo vtysh -c "show ip bgp summary"') diff --git a/show/bgp_quagga_v6.py b/show/bgp_quagga_v6.py index e2afe0f13e..f5e8ceef23 100644 --- a/show/bgp_quagga_v6.py +++ b/show/bgp_quagga_v6.py @@ -1,5 +1,5 @@ import click -from show.main import * +from show.main import ipv6, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ipv6.group(cls=AliasedGroup, default_if_no_args=False) +@ipv6.group() def bgp(): """Show IPv6 BGP (Border Gateway Protocol) information""" pass @@ -22,7 +22,7 @@ def summary(): try: device_output = run_command('sudo vtysh -c "show ipv6 bgp summary"', return_cmd=True) get_bgp_summary_extended(device_output) - except: + except Exception: run_command('sudo vtysh -c "show ipv6 bgp summary"') diff --git a/show/main.py b/show/main.py index 566874d4ca..6f4267c5ce 100755 --- a/show/main.py +++ b/show/main.py @@ -180,7 +180,7 @@ def get_routing_stack(): proc.wait() result = stdout.rstrip('\n') - except OSError, e: + except OSError as e: raise OSError("Cannot detect routing-stack") return (result) @@ -448,7 +448,7 @@ def get_neighbor_dict_from_table(db,table_name): neighbor_dict[entry] = neighbor_data[entry].get( 'name') if 'name' in neighbor_data[entry].keys() else 'NotAvailable' return neighbor_dict - except: + except Exception: return neighbor_dict @@ -498,7 +498,7 @@ def get_dynamic_neighbor_subnet(db): dynamic_neighbor["v4"] = v4_subnet dynamic_neighbor["v6"] = v6_subnet return dynamic_neighbor - except: + except Exception: return neighbor_data @@ -1398,7 +1398,7 @@ def interfaces(): try: neighbor_name = bgp_peer[local_ip][0] neighbor_ip = bgp_peer[local_ip][1] - except: + except Exception: pass if len(ifaddresses) > 0: @@ -1538,7 +1538,7 @@ def interfaces(): try: neighbor_name = bgp_peer[local_ip][0] neighbor_ip = bgp_peer[local_ip][1] - except: + except Exception: pass if len(ifaddresses) > 0: @@ -2183,7 +2183,6 @@ def brief(verbose): vlan_dhcp_helper_dict[str(key.strip('Vlan'))] = vlan_dhcp_helper_data[key]['dhcp_servers'] except KeyError: vlan_dhcp_helper_dict[str(key.strip('Vlan'))] = " " - pass # Parsing VLAN Gateway info for key in natsorted(vlan_ip_data.keys()): @@ -2868,7 +2867,6 @@ def ztp(status, verbose): if os.geteuid() != 0: exit("Root privileges are required for this operation") - pass cmd = "ztp status" if verbose: diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 32be097dd0..2f8e81032c 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -387,7 +387,7 @@ def install(url, force, skip_migration=False): validate_url_or_abort(url) try: urllib.urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook) - except Exception, e: + except Exception as e: click.echo("Download error", e) raise click.Abort() image_path = DEFAULT_IMAGE_PATH @@ -553,7 +553,7 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): validate_url_or_abort(url) try: urllib.urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook) - except Exception, e: + except Exception as e: click.echo("Download error", e) raise click.Abort() image_path = DEFAULT_IMAGE_PATH diff --git a/ssdutil/main.py b/ssdutil/main.py index c73fa147dc..7accba63a5 100755 --- a/ssdutil/main.py +++ b/ssdutil/main.py @@ -64,7 +64,7 @@ def get_platform_and_hwsku(): stdout = proc.communicate()[0] proc.wait() hwsku = stdout.rstrip('\n') - except OSError, e: + except OSError as e: raise OSError("Cannot detect platform") return (platform, hwsku) diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py index 8ac7286391..24c7131d35 100644 --- a/utilities_common/util_base.py +++ b/utilities_common/util_base.py @@ -7,7 +7,7 @@ import os import sys import syslog -except ImportError, e: +except ImportError as e: raise ImportError (str(e) + " - required module not found") # @@ -89,7 +89,7 @@ def get_platform_and_hwsku(self): stdout = proc.communicate()[0] proc.wait() hwsku = stdout.rstrip('\n') - except OSError, e: + except OSError as e: raise OSError("Failed to detect platform: %s" % (str(e))) return (platform, hwsku) @@ -130,7 +130,7 @@ def load_platform_util(self, module_name, class_name): try: module_file = "/".join([platform_path, "plugins", module_name + ".py"]) module = imp.load_source(module_name, module_file) - except IOError, e: + except IOError as e: raise IOError("Failed to load platform module '%s': %s" % (module_name, str(e))) try: @@ -140,7 +140,7 @@ def load_platform_util(self, module_name, class_name): platform_util = platform_util_class('','','','') else: platform_util = platform_util_class() - except AttributeError, e: + except AttributeError as e: raise AttributeError("Failed to instantiate '%s' class: %s" % (class_name, str(e))) return platform_util From 86cb844f5860d0003b2df72d7053285447c39e2e Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Tue, 7 Apr 2020 22:31:50 -0700 Subject: [PATCH 037/111] Fix more Python warnings (#869) * Fix "First parameter of a method is not named 'self'" warnings * Fix "Module is imported with 'import' and 'import from'" warning * Fix 'Testing equality to None' warnings * Clean up 'Unused import' warnings --- config/main.py | 1 - config/mlnx.py | 4 ---- config/nat.py | 2 -- connect/main.py | 3 --- consutil/lib.py | 1 - consutil/main.py | 2 -- crm/main.py | 1 - debug/main.py | 2 -- pddf_fanutil/main.py | 7 ------- pddf_ledutil/main.py | 8 -------- pddf_psuutil/main.py | 7 ------- pddf_thermalutil/main.py | 7 ------- pfc/main.py | 3 +-- psuutil/main.py | 2 -- scripts/aclshow | 3 --- scripts/configlet | 4 ---- scripts/db_migrator.py | 3 --- scripts/decode-syseeprom | 8 +------- scripts/dump_nat_entries.py | 1 - scripts/ecnconfig | 1 - scripts/fanshow | 2 -- scripts/fdbclear | 4 +--- scripts/intfstat | 4 ---- scripts/natclear | 3 --- scripts/natconfig | 2 -- scripts/natshow | 1 - scripts/neighbor_advertiser | 2 -- scripts/pfcstat | 3 --- scripts/portconfig | 2 -- scripts/portstat | 3 --- scripts/queuestat | 2 -- scripts/sfpshow | 4 +--- scripts/sonic-kdump-config | 14 ++++++-------- scripts/teamshow | 3 --- scripts/tempershow | 2 -- scripts/update_json.py | 1 - scripts/watermarkstat | 1 - sfputil/main.py | 2 -- show/main.py | 5 ++--- show/mlnx.py | 2 -- sonic-utilities-tests/drops_group_test.py | 3 --- sonic-utilities-tests/intfstat_test.py | 4 ---- sonic-utilities-tests/psu_test.py | 1 + sonic-utilities-tests/sfp_test.py | 1 + sonic_installer/main.py | 2 -- undebug/main.py | 2 -- utilities_common/util_base.py | 2 -- 47 files changed, 14 insertions(+), 133 deletions(-) diff --git a/config/main.py b/config/main.py index e8f89a19dd..025346e3a6 100755 --- a/config/main.py +++ b/config/main.py @@ -3,7 +3,6 @@ import sys import os import click -import json import subprocess import netaddr import re diff --git a/config/mlnx.py b/config/mlnx.py index 5ad422d7cf..9d7810a6a9 100644 --- a/config/mlnx.py +++ b/config/mlnx.py @@ -10,12 +10,8 @@ import os import subprocess import click - import imp import syslog - import types - import traceback import time - from tabulate import tabulate except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/config/nat.py b/config/nat.py index 762252eea8..b56c9d7e52 100644 --- a/config/nat.py +++ b/config/nat.py @@ -1,8 +1,6 @@ #!/usr/bin/env python import click -import socket -import netaddr import ipaddress from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector diff --git a/connect/main.py b/connect/main.py index 9fb7d782bd..b34ee11aab 100755 --- a/connect/main.py +++ b/connect/main.py @@ -1,11 +1,8 @@ #! /usr/bin/python -u import click -import errno import os import pexpect -import subprocess -import sys from click_default_group import DefaultGroup try: diff --git a/consutil/lib.py b/consutil/lib.py index 1955c7763d..0ffa88691d 100644 --- a/consutil/lib.py +++ b/consutil/lib.py @@ -8,7 +8,6 @@ try: import click import re - import swsssdk import subprocess import sys except ImportError as e: diff --git a/consutil/main.py b/consutil/main.py index de178ea9a9..c9a914d478 100644 --- a/consutil/main.py +++ b/consutil/main.py @@ -9,8 +9,6 @@ import click import os import pexpect - import re - import subprocess import sys from tabulate import tabulate from lib import * diff --git a/crm/main.py b/crm/main.py index 241362ed13..223029d721 100644 --- a/crm/main.py +++ b/crm/main.py @@ -1,6 +1,5 @@ #!/usr/bin/env python -import os import click import swsssdk from tabulate import tabulate diff --git a/debug/main.py b/debug/main.py index 956c72404c..b72394b22d 100755 --- a/debug/main.py +++ b/debug/main.py @@ -2,10 +2,8 @@ # date: 07/12/17 import click -import os import subprocess from click_default_group import DefaultGroup -from pprint import pprint def run_command(command, pager=False): click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) diff --git a/pddf_fanutil/main.py b/pddf_fanutil/main.py index 68db948e63..c34ef43185 100644 --- a/pddf_fanutil/main.py +++ b/pddf_fanutil/main.py @@ -8,15 +8,8 @@ try: import sys import os - import subprocess import click - import imp - import syslog - import types - import traceback from tabulate import tabulate - from utilities_common import util_base - from utilities_common.util_base import UtilLogger from utilities_common.util_base import UtilHelper except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/pddf_ledutil/main.py b/pddf_ledutil/main.py index df471ec8ef..217781abf0 100644 --- a/pddf_ledutil/main.py +++ b/pddf_ledutil/main.py @@ -8,15 +8,7 @@ try: import sys import os - import subprocess import click - import imp - import syslog - import types - import traceback - from tabulate import tabulate - from utilities_common import util_base - from utilities_common.util_base import UtilLogger from utilities_common.util_base import UtilHelper except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/pddf_psuutil/main.py b/pddf_psuutil/main.py index a034f34a08..14778235fe 100644 --- a/pddf_psuutil/main.py +++ b/pddf_psuutil/main.py @@ -8,15 +8,8 @@ try: import sys import os - import subprocess import click - import imp - import syslog - import types - import traceback from tabulate import tabulate - from utilities_common import util_base - from utilities_common.util_base import UtilLogger from utilities_common.util_base import UtilHelper except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/pddf_thermalutil/main.py b/pddf_thermalutil/main.py index be91273a7b..34314ce611 100644 --- a/pddf_thermalutil/main.py +++ b/pddf_thermalutil/main.py @@ -8,15 +8,8 @@ try: import sys import os - import subprocess import click - import imp - import syslog - import types - import traceback from tabulate import tabulate - from utilities_common import util_base - from utilities_common.util_base import UtilLogger from utilities_common.util_base import UtilHelper except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/pfc/main.py b/pfc/main.py index 52ac4ed122..9da2147070 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -1,6 +1,5 @@ #!/usr/bin/env python -import os import click import swsssdk from tabulate import tabulate @@ -160,4 +159,4 @@ def showPrio(interface): config.add_command(configAsym, "asymmetric") config.add_command(configPrio, "priority") show.add_command(showAsym, "asymmetric") -show.add_command(showPrio, "priority") \ No newline at end of file +show.add_command(showPrio, "priority") diff --git a/psuutil/main.py b/psuutil/main.py index fd0b336436..3bc09bf426 100644 --- a/psuutil/main.py +++ b/psuutil/main.py @@ -12,8 +12,6 @@ import click import imp import syslog - import types - import traceback from tabulate import tabulate except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/scripts/aclshow b/scripts/aclshow index a574a2325f..2c8beeedab 100755 --- a/scripts/aclshow +++ b/scripts/aclshow @@ -21,13 +21,10 @@ from __future__ import print_function import argparse import json import os -import re -import subprocess import swsssdk import sys from tabulate import tabulate -from natsort import natsorted ### temp file to save counter positions when doing clear counter action. ### if we could have a SAI command to clear counters will be better, so no need to maintain diff --git a/scripts/configlet b/scripts/configlet index 18ee933294..9837a0362e 100755 --- a/scripts/configlet +++ b/scripts/configlet @@ -76,13 +76,9 @@ A sample for update: """ from __future__ import print_function -import sys -import os.path import argparse import json import time -from collections import OrderedDict -from natsort import natsorted from swsssdk import ConfigDBConnector test_only = False diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index b610a35912..8d61c29203 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -6,9 +6,6 @@ import syslog from swsssdk import ConfigDBConnector import sonic_device_util -import os -import subprocess -import json SYSLOG_IDENTIFIER = 'db_migrator' diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 6ab1324091..e687f4fb68 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -5,17 +5,11 @@ # This is the main script that handles eeprom encoding and decoding # try: - import exceptions - import binascii - import time import optparse import warnings import os - import subprocess import sys - from array import array import imp - from sonic_eeprom import eeprom_dts import glob from sonic_device_util import get_machine_info from sonic_device_util import get_platform_info @@ -55,7 +49,7 @@ def main(): # Currently, don't support eeprom db on Arista platform platforms_without_eeprom_db = ['arista', 'kvm'] if any(platform in platform_path for platform in platforms_without_eeprom_db)\ - or getattr(t, 'read_eeprom_db', None) == None: + or getattr(t, 'read_eeprom_db', None) is None: support_eeprom_db = False # diff --git a/scripts/dump_nat_entries.py b/scripts/dump_nat_entries.py index 0bd1baf155..5535394907 100644 --- a/scripts/dump_nat_entries.py +++ b/scripts/dump_nat_entries.py @@ -5,7 +5,6 @@ so as to restore them during warm reboot """ -import sys import subprocess def main(): diff --git a/scripts/ecnconfig b/scripts/ecnconfig index 607b983ec6..899e6148df 100755 --- a/scripts/ecnconfig +++ b/scripts/ecnconfig @@ -50,7 +50,6 @@ from __future__ import print_function import os import sys -import json import argparse import swsssdk from tabulate import tabulate diff --git a/scripts/fanshow b/scripts/fanshow index 81d0a9e2d8..ea6ff3dbd5 100644 --- a/scripts/fanshow +++ b/scripts/fanshow @@ -4,8 +4,6 @@ """ from __future__ import print_function -import argparse - from tabulate import tabulate from swsssdk import SonicV2Connector from natsort import natsorted diff --git a/scripts/fdbclear b/scripts/fdbclear index a8100af2cb..de5d380bef 100644 --- a/scripts/fdbclear +++ b/scripts/fdbclear @@ -15,9 +15,7 @@ import argparse import json import sys -from natsort import natsorted -from swsssdk import SonicV2Connector, port_util -from tabulate import tabulate +from swsssdk import SonicV2Connector class FdbClear(object): diff --git a/scripts/intfstat b/scripts/intfstat index 9341658b50..c010bbc374 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -9,12 +9,8 @@ import argparse import cPickle as pickle import datetime -import getopt import sys import os -import json -import re -import subprocess import swsssdk import sys import time diff --git a/scripts/natclear b/scripts/natclear index be4b2060e3..7883c8fd65 100644 --- a/scripts/natclear +++ b/scripts/natclear @@ -12,11 +12,8 @@ import argparse import json import sys -import subprocess -from natsort import natsorted from swsssdk import SonicV2Connector -from tabulate import tabulate class NatClear(object): diff --git a/scripts/natconfig b/scripts/natconfig index d94dc586ac..b9f7869e55 100644 --- a/scripts/natconfig +++ b/scripts/natconfig @@ -39,10 +39,8 @@ """ import argparse -import json import sys -from natsort import natsorted from tabulate import tabulate from swsssdk import ConfigDBConnector diff --git a/scripts/natshow b/scripts/natshow index 3d810d6aae..64247501e2 100644 --- a/scripts/natshow +++ b/scripts/natshow @@ -61,7 +61,6 @@ import json import sys import re -from natsort import natsorted from swsssdk import SonicV2Connector from tabulate import tabulate diff --git a/scripts/neighbor_advertiser b/scripts/neighbor_advertiser index 105f21795b..d6f280e7c5 100644 --- a/scripts/neighbor_advertiser +++ b/scripts/neighbor_advertiser @@ -17,9 +17,7 @@ import traceback import subprocess import time import warnings -import sonic_device_util from swsssdk import ConfigDBConnector -from swsssdk import SonicV2Connector from netaddr import IPAddress, IPNetwork diff --git a/scripts/pfcstat b/scripts/pfcstat index 0eb135d798..3afcb34f72 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -11,10 +11,7 @@ import sys import argparse import cPickle as pickle import datetime -import getopt -import json import os.path -import time from collections import namedtuple, OrderedDict from natsort import natsorted diff --git a/scripts/portconfig b/scripts/portconfig index 07b4828a29..6b21739d95 100755 --- a/scripts/portconfig +++ b/scripts/portconfig @@ -17,9 +17,7 @@ optional arguments: """ from __future__ import print_function -import os import sys -import json import argparse import swsssdk diff --git a/scripts/portstat b/scripts/portstat index 8da9b27788..29cdbd0c62 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -9,10 +9,7 @@ import argparse import cPickle as pickle import datetime -import getopt import os.path -import re -import subprocess import swsssdk import sys import time diff --git a/scripts/queuestat b/scripts/queuestat index b93251ac8d..f6e9d2b466 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -9,8 +9,6 @@ import argparse import cPickle as pickle import datetime -import getopt -import json import os.path import swsssdk import sys diff --git a/scripts/sfpshow b/scripts/sfpshow index 19babf4ccc..01970b3191 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -3,8 +3,6 @@ Script to show sfp eeprom and presence status. Not like sfputil this scripts get the sfp data from DB directly. """ -import argparse -import json import sys import click import re @@ -12,7 +10,7 @@ import operator import os from natsort import natsorted -from swsssdk import SonicV2Connector, port_util +from swsssdk import SonicV2Connector from tabulate import tabulate # Mock the redis for unit test purposes # diff --git a/scripts/sonic-kdump-config b/scripts/sonic-kdump-config index eaf5ed3834..640dd5d9b5 100755 --- a/scripts/sonic-kdump-config +++ b/scripts/sonic-kdump-config @@ -19,10 +19,8 @@ limitations under the License. import sys import argparse import shlex -from argparse import RawTextHelpFormatter import os import subprocess -import errno from swsssdk import ConfigDBConnector aboot_cfg_template ="/host/image-%s/kernel-cmdline" @@ -280,7 +278,7 @@ def kdump_enable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file) crash_kernel_mem = search_for_crash_kernel(lines[img_index]) if verbose: print("crash_kernel_mem=[%s]" % crash_kernel_mem) - if crash_kernel_mem == None: + if crash_kernel_mem is None: lines[img_index] += " crashkernel=%s" % memory changed = True if verbose: @@ -348,7 +346,7 @@ def kdump_disable(verbose, kdump_enabled, memory, num_dumps, image, cmdline_file changed = False crash_kernel_mem = search_for_crash_kernel(lines[img_index]) - if crash_kernel_mem == None: + if crash_kernel_mem is None: print("kdump is already disabled") else: lines[img_index] = lines[img_index].replace("crashkernel="+crash_kernel_mem, "") @@ -388,7 +386,7 @@ def cmd_kdump_disable(verbose, image=get_current_image()): # @param memory If not None, new value to set. # If None, display current value read from running configuration def cmd_kdump_memory(verbose, memory): - if memory == None: + if memory is None: (rc, lines, err_str) = run_command("/usr/bin/show kdump memory", use_shell=False); print('\n'.join(lines)) else: @@ -405,7 +403,7 @@ def cmd_kdump_memory(verbose, memory): # @param memory If not None, new value to set. # If None, display current value read from running configuration def cmd_kdump_num_dumps(verbose, num_dumps): - if num_dumps == None: + if num_dumps is None: (rc, lines, err_str) = run_command("/usr/bin/show kdump num_dumps", use_shell=False); print('\n'.join(lines)) else: @@ -484,7 +482,7 @@ def cmd_kdump_file(num_lines, filename): if x.find(filename) != -1: fname = x break - if fname == None: + if fname is None: print("Invalid key") sys.exit(1) (rc, lines, err_str) = run_command("/usr/bin/tail -n %d %s" % (num_lines, fname), use_shell=False); @@ -500,7 +498,7 @@ def main(): # Add allowed arguments parser = argparse.ArgumentParser(description="kdump configuration and status tool", - formatter_class=RawTextHelpFormatter) + formatter_class=argparse.RawTextHelpFormatter) # Enable kdump on Current image parser.add_argument('--enable', action='store_true', diff --git a/scripts/teamshow b/scripts/teamshow index fa203c8259..abb4808753 100755 --- a/scripts/teamshow +++ b/scripts/teamshow @@ -21,14 +21,11 @@ import json import os -import re import swsssdk import subprocess import sys from tabulate import tabulate from natsort import natsorted -from sonic_device_util import get_machine_info -from sonic_device_util import get_platform_info PORT_CHANNEL_APPL_TABLE_PREFIX = "LAG_TABLE:" PORT_CHANNEL_CFG_TABLE_PREFIX = "PORTCHANNEL|" diff --git a/scripts/tempershow b/scripts/tempershow index aabc4943ed..23f6da2565 100644 --- a/scripts/tempershow +++ b/scripts/tempershow @@ -4,8 +4,6 @@ """ from __future__ import print_function -import argparse - from tabulate import tabulate from swsssdk import SonicV2Connector from natsort import natsorted diff --git a/scripts/update_json.py b/scripts/update_json.py index a42603e8fe..11ea7b8982 100755 --- a/scripts/update_json.py +++ b/scripts/update_json.py @@ -1,7 +1,6 @@ #! /usr/bin/env python import os -import sys import json import argparse diff --git a/scripts/watermarkstat b/scripts/watermarkstat index 9b446248da..8b567ea3b5 100644 --- a/scripts/watermarkstat +++ b/scripts/watermarkstat @@ -7,7 +7,6 @@ ##################################################################### import argparse -import getopt import json import sys import swsssdk diff --git a/sfputil/main.py b/sfputil/main.py index 18576ebd50..654cb8d380 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -12,8 +12,6 @@ import click import imp import syslog - import types - import traceback from tabulate import tabulate except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/show/main.py b/show/main.py index 6f4267c5ce..c4c0d4e493 100755 --- a/show/main.py +++ b/show/main.py @@ -1,6 +1,5 @@ #! /usr/bin/python -u -import errno import json import netaddr import netifaces @@ -2147,7 +2146,7 @@ def files(): @click.argument('lines', metavar='', required=False) def log(record, lines): """Show kdump kernel core dump file kernel log""" - if lines == None: + if lines is None: run_command("sonic-kdump-config --file %s" % record) else: run_command("sonic-kdump-config --file %s --lines %s" % (record, lines)) @@ -2270,7 +2269,7 @@ def tablelize(keys, data): entry = config_db.get_entry('VLAN_MEMBER', (k, m)) mode = entry.get('tagging_mode') - if mode == None: + if mode is None: r.append('?') else: r.append(mode) diff --git a/show/mlnx.py b/show/mlnx.py index 9ae19002eb..6b0d304a27 100644 --- a/show/mlnx.py +++ b/show/mlnx.py @@ -9,8 +9,6 @@ import sys import subprocess import click - import sonic_device_util - from swsssdk import ConfigDBConnector import xml.etree.ElementTree as ET except ImportError as e: raise ImportError("%s - required module not found" % str(e)) diff --git a/sonic-utilities-tests/drops_group_test.py b/sonic-utilities-tests/drops_group_test.py index 6a9e71099c..df3c6c4f93 100644 --- a/sonic-utilities-tests/drops_group_test.py +++ b/sonic-utilities-tests/drops_group_test.py @@ -1,8 +1,5 @@ import sys import os -import pytest -import click -import swsssdk from click.testing import CliRunner test_path = os.path.dirname(os.path.abspath(__file__)) diff --git a/sonic-utilities-tests/intfstat_test.py b/sonic-utilities-tests/intfstat_test.py index a3bac6c2cd..f1d11e4f84 100644 --- a/sonic-utilities-tests/intfstat_test.py +++ b/sonic-utilities-tests/intfstat_test.py @@ -1,8 +1,5 @@ import sys import os -import pytest -import click -import swsssdk from click.testing import CliRunner test_path = os.path.dirname(os.path.abspath(__file__)) @@ -12,7 +9,6 @@ sys.path.insert(0, modules_path) import mock_tables.dbconnector - import show.main as show import clear.main as clear diff --git a/sonic-utilities-tests/psu_test.py b/sonic-utilities-tests/psu_test.py index 962b6dbcb9..2c8c7e2661 100644 --- a/sonic-utilities-tests/psu_test.py +++ b/sonic-utilities-tests/psu_test.py @@ -42,6 +42,7 @@ def test_single_psu(self): """ assert result.output == expected + @classmethod def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) diff --git a/sonic-utilities-tests/sfp_test.py b/sonic-utilities-tests/sfp_test.py index f1cdda6eef..695258d60b 100644 --- a/sonic-utilities-tests/sfp_test.py +++ b/sonic-utilities-tests/sfp_test.py @@ -113,6 +113,7 @@ def test_sfp_eeprom(self): expected = "Ethernet200: SFP EEPROM Not detected" assert result_lines == expected + @classmethod def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 2f8e81032c..1ea71cbd81 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -3,14 +3,12 @@ import os import re import signal -import stat import sys import time import click import urllib import syslog import subprocess -from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector import collections import platform diff --git a/undebug/main.py b/undebug/main.py index 55f62ebb3f..a148445f73 100644 --- a/undebug/main.py +++ b/undebug/main.py @@ -2,10 +2,8 @@ # date: 07/12/17 import click -import os import subprocess from click_default_group import DefaultGroup -from pprint import pprint def run_command(command, pager=False): click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) diff --git a/utilities_common/util_base.py b/utilities_common/util_base.py index 24c7131d35..2659824982 100644 --- a/utilities_common/util_base.py +++ b/utilities_common/util_base.py @@ -2,10 +2,8 @@ try: import imp - import signal import subprocess import os - import sys import syslog except ImportError as e: raise ImportError (str(e) + " - required module not found") From 42ef46a602340124bd90ed6899f0e30295f37924 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Wed, 8 Apr 2020 22:07:20 +0300 Subject: [PATCH 038/111] [showtech]: dump docker stats (#864) * Add docker to 'show techsupport' command Signed-off-by: Shlomi Bitton --- scripts/generate_dump | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index 132d78f3c5..105247e023 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -331,6 +331,7 @@ main() { save_cmd "show platform summary" "platform.summary" save_cmd "show platform syseeprom" "platform.syseeprom" save_cmd "cat /host/machine.conf" "machine.conf" + save_cmd "docker stats --no-stream" "docker.stats" save_cmd "sensors" "sensors" save_cmd "show platform psustatus" "platform.psustatus" From 792088236a2448107fffd70b13b78583ce46330d Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Thu, 9 Apr 2020 01:03:09 -0700 Subject: [PATCH 039/111] Fix even more Python warnings (#873) --- clear/bgp_frr_v6.py | 11 ++++------- clear/bgp_quagga_v4.py | 11 ++++------- clear/bgp_quagga_v6.py | 11 ++++------- config/main.py | 1 - config/nat.py | 5 ----- fwutil/lib.py | 8 +------- pddf_fanutil/main.py | 2 -- pddf_psuutil/main.py | 3 --- pddf_thermalutil/main.py | 1 - psuutil/main.py | 1 - scripts/aclshow | 4 ---- scripts/ecnconfig | 2 +- scripts/intfstat | 1 - scripts/intfutil | 3 +-- scripts/natshow | 6 ------ scripts/nbrshow | 1 - scripts/neighbor_advertiser | 2 +- scripts/pfcstat | 2 -- scripts/queuestat | 4 ---- scripts/sonic-kdump-config | 1 - scripts/teamshow | 2 +- sfputil/main.py | 3 --- show/main.py | 10 ++-------- sonic_installer/main.py | 7 +++---- 24 files changed, 22 insertions(+), 80 deletions(-) diff --git a/clear/bgp_frr_v6.py b/clear/bgp_frr_v6.py index c8c3451086..58c50e28d9 100644 --- a/clear/bgp_frr_v6.py +++ b/clear/bgp_frr_v6.py @@ -1,5 +1,5 @@ import click -from clear.main import * +from clear.main import ipv6, run_command ############################################################################### @@ -9,8 +9,7 @@ ############################################################################### -@ipv6.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@ipv6.group() def bgp(): """Clear IPv6 BGP (Border Gateway Protocol) information""" pass @@ -24,8 +23,7 @@ def default(): run_command(command) -@bgp.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@bgp.group() def neighbor(): """Clear specific BGP peers""" pass @@ -69,8 +67,7 @@ def neigh_out(ipaddress): run_command(command) -@neighbor.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@neighbor.group() def soft(): """Soft reconfig BGP's inbound/outbound updates""" pass diff --git a/clear/bgp_quagga_v4.py b/clear/bgp_quagga_v4.py index de2b998dfc..56675953ca 100644 --- a/clear/bgp_quagga_v4.py +++ b/clear/bgp_quagga_v4.py @@ -1,5 +1,5 @@ import click -from clear.main import * +from clear.main import ip, run_command ############################################################################### @@ -9,8 +9,7 @@ ############################################################################### -@ip.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@ip.group() def bgp(): """Clear BGP (Border Gateway Protocol) peers""" pass @@ -24,8 +23,7 @@ def default(): run_command(command) -@bgp.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@bgp.group() def neighbor(): """Clear specific BGP peers""" pass @@ -69,8 +67,7 @@ def neigh_out(ipaddress): run_command(command) -@neighbor.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@neighbor.group() def soft(): """Soft reconfig BGP's inbound/outbound updates""" pass diff --git a/clear/bgp_quagga_v6.py b/clear/bgp_quagga_v6.py index 6fb6d2ad38..ad6758d2af 100644 --- a/clear/bgp_quagga_v6.py +++ b/clear/bgp_quagga_v6.py @@ -1,5 +1,5 @@ import click -from clear.main import * +from clear.main import ipv6, run_command ############################################################################### @@ -9,8 +9,7 @@ ############################################################################### -@ipv6.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@ipv6.group() def bgp(): """Clear IPv6 BGP (Border Gateway Protocol) information""" pass @@ -24,8 +23,7 @@ def default(): run_command(command) -@bgp.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@bgp.group() def neighbor(): """Clear specific BGP peers""" pass @@ -69,8 +67,7 @@ def neigh_out(ipaddress): run_command(command) -@neighbor.group(cls=AliasedGroup, default_if_no_args=True, - context_settings=CONTEXT_SETTINGS) +@neighbor.group() def soft(): """Soft reconfig BGP's inbound/outbound updates""" pass diff --git a/config/main.py b/config/main.py index 025346e3a6..1d196826d7 100755 --- a/config/main.py +++ b/config/main.py @@ -2297,7 +2297,6 @@ def ztp(): if os.geteuid() != 0: exit("Root privileges are required for this operation") - pass @ztp.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, diff --git a/config/nat.py b/config/nat.py index b56c9d7e52..e6c31c0b67 100644 --- a/config/nat.py +++ b/config/nat.py @@ -66,7 +66,6 @@ def isIpOverlappingWithAnyStaticEntry(ipAddress, table): for key,values in static_dict.items(): global_ip = "---" - local_ip = "---" nat_type = "dnat" if table == 'STATIC_NAPT': @@ -542,8 +541,6 @@ def remove_tcp(ctx, global_ip, global_port, local_ip, local_port): entryFound = False table = "STATIC_NAPT" key = "{}|TCP|{}".format(global_ip, global_port) - dataKey1 = 'local_ip' - dataKey2 = 'local_port' data = config_db.get_entry(table, key) if data: @@ -665,8 +662,6 @@ def add_pool(ctx, pool_name, global_ip_range, global_port_range): else: if is_valid_port_address(port_address[0]) is False: ctx.fail("Given port value {} is invalid. Please enter a valid port value !!".format(port_address[0])) - portLowLimit = int(port_address[0]) - portHighLimit = int(port_address[0]) else: global_port_range = "NULL" diff --git a/fwutil/lib.py b/fwutil/lib.py index 156fe47516..897ead6cc8 100755 --- a/fwutil/lib.py +++ b/fwutil/lib.py @@ -106,8 +106,6 @@ def is_url(self): return False def retrieve(self): - filename, headers = None, None - self.__validate() result = urlparse(self.__url) @@ -126,7 +124,7 @@ def retrieve(self): self.DOWNLOAD_PATH_TEMPLATE.format(basename), self.__reporthook ) - except: + except Exception: if os.path.exists(self.DOWNLOAD_PATH_TEMPLATE.format(basename)): os.remove(self.DOWNLOAD_PATH_TEMPLATE.format(basename)) raise @@ -560,7 +558,6 @@ def get_status(self, force): firmware_path = NA firmware_version_current = chassis_component.get_firmware_version() - firmware_version_available = NA firmware_version = firmware_version_current status = self.FW_STATUS_UP_TO_DATE @@ -608,7 +605,6 @@ def get_status(self, force): firmware_path = NA firmware_version_current = module_component.get_firmware_version() - firmware_version_available = NA firmware_version = firmware_version_current status = self.FW_STATUS_UP_TO_DATE @@ -662,7 +658,6 @@ def update_firmware(self, force): ) firmware_version_current = chassis_component.get_firmware_version() - firmware_version_available = NA status = self.FW_STATUS_UP_TO_DATE @@ -724,7 +719,6 @@ def update_firmware(self, force): ) firmware_version_current = module_component.get_firmware_version() - firmware_version_available = NA status = self.FW_STATUS_UP_TO_DATE diff --git a/pddf_fanutil/main.py b/pddf_fanutil/main.py index c34ef43185..5c081f0124 100644 --- a/pddf_fanutil/main.py +++ b/pddf_fanutil/main.py @@ -110,7 +110,6 @@ def direction(index): status_table = [] for fan in fan_ids: - msg = "" fan_name = "FAN {}".format(fan) if fan not in supported_fan: click.echo("Error! The {} is not available on the platform.\n" \ @@ -138,7 +137,6 @@ def getspeed(index): status_table = [] for fan in fan_ids: - msg = "" fan_name = "FAN {}".format(fan) if fan not in supported_fan: click.echo("Error! The {} is not available on the platform.\n" \ diff --git a/pddf_psuutil/main.py b/pddf_psuutil/main.py index 14778235fe..0209d7592c 100644 --- a/pddf_psuutil/main.py +++ b/pddf_psuutil/main.py @@ -104,14 +104,12 @@ def mfrinfo(index): """Display PSU manufacturer info""" supported_psu = range(1, platform_psuutil.get_num_psus() + 1) psu_ids = [] - info = "" if (index < 0): psu_ids = supported_psu else: psu_ids = [index] for psu in psu_ids: - msg = "" psu_name = "PSU {}".format(psu) if psu not in supported_psu: click.echo("Error! The {} is not available on the platform.\n" \ @@ -145,7 +143,6 @@ def seninfo(index): psu_ids = [index] for psu in psu_ids: - msg = "" psu_name = "PSU {}".format(psu) if psu not in supported_psu: click.echo("Error! The {} is not available on the platform.\n" \ diff --git a/pddf_thermalutil/main.py b/pddf_thermalutil/main.py index 34314ce611..077d30b08d 100644 --- a/pddf_thermalutil/main.py +++ b/pddf_thermalutil/main.py @@ -81,7 +81,6 @@ def gettemp(index): status_table = [] for thermal in thermal_ids: - msg = "" thermal_name = "TEMP{}".format(thermal) if thermal not in supported_thermal: click.echo("Error! The {} is not available on the platform.\n" \ diff --git a/psuutil/main.py b/psuutil/main.py index 3bc09bf426..180cbecfa7 100644 --- a/psuutil/main.py +++ b/psuutil/main.py @@ -101,7 +101,6 @@ def load_platform_psuutil(): platform_path = "/".join([PLATFORM_ROOT_PATH, platform]) else: platform_path = PLATFORM_ROOT_PATH_DOCKER - hwsku_path = "/".join([platform_path, hwsku]) try: module_file = "/".join([platform_path, "plugins", PLATFORM_SPECIFIC_MODULE_NAME + ".py"]) diff --git a/scripts/aclshow b/scripts/aclshow index 2c8beeedab..acfa2e12a8 100755 --- a/scripts/aclshow +++ b/scripts/aclshow @@ -92,9 +92,6 @@ class AclStat(object): read redis database for acl counters """ - def qstrip(string): - return string.strip().strip(" \"").rstrip("\"") - def lowercase_keys(dictionary): return dict((k.lower(), v) for k,v in dictionary.iteritems()) if dictionary else None @@ -128,7 +125,6 @@ class AclStat(object): """ Get ACL counters from the DB """ - counters_cnt = len(self.acl_rules) # num of counters should be the same as rules for table, rule in self.acl_rules.keys(): cnt_props = lowercase_keys(self.db.get_all(self.db.COUNTERS_DB, "COUNTERS:%s:%s" % (table, rule))) self.acl_counters[table, rule] = cnt_props diff --git a/scripts/ecnconfig b/scripts/ecnconfig index 899e6148df..f9adad1d4e 100755 --- a/scripts/ecnconfig +++ b/scripts/ecnconfig @@ -265,7 +265,7 @@ def main(): # get current configuration data wred_profile_data = prof_cfg.get_profile_data(args.profile) - if wred_profile_data == None: + if wred_profile_data is None: raise Exception("Input arguments error. Invalid WRED profile %s" % (args.profile)) if args.green_max: diff --git a/scripts/intfstat b/scripts/intfstat index c010bbc374..86e628f8e5 100755 --- a/scripts/intfstat +++ b/scripts/intfstat @@ -312,7 +312,6 @@ def main(): sys.exit(0) if wait_time_in_seconds == 0: - cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file): try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'r')) diff --git a/scripts/intfutil b/scripts/intfutil index 1794399890..bdee791122 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -220,8 +220,7 @@ def get_portchannel_list(get_raw_po_int_configdb_info): portchannel = po[0] if portchannel not in portchannel_list: portchannel_list.append(portchannel) - portchannel = portchannel_list.sort() - return portchannel_list + return natsorted(portchannel_list) def create_po_int_tuple_list(get_raw_po_int_configdb_info): """ diff --git a/scripts/natshow b/scripts/natshow index 64247501e2..ed99b001e7 100644 --- a/scripts/natshow +++ b/scripts/natshow @@ -132,8 +132,6 @@ class NatShow(object): continue ip_protocol = "all" - source = "---" - destination = "---" translated_dst = "---" translated_src = "---" @@ -280,8 +278,6 @@ class NatShow(object): nat_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAT_TWICE_TABLE:{}'.format(nat_twice_entry)) ip_protocol = "all" - source = "---" - destination = "---" source = nat_twice_keys[0] destination = nat_twice_keys[1] @@ -306,8 +302,6 @@ class NatShow(object): napt_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TWICE_TABLE:{}'.format(napt_twice_entry)) ip_protocol = napt_twice_keys[0] - source = "---" - destination = "---" source = napt_twice_keys[1] + ':' + napt_twice_keys[2] destination = napt_twice_keys[3] + ':' + napt_twice_keys[4] diff --git a/scripts/nbrshow b/scripts/nbrshow index 3014492ed7..b607070150 100644 --- a/scripts/nbrshow +++ b/scripts/nbrshow @@ -80,7 +80,6 @@ class NbrBase(object): ent = self.db.get_all('ASIC_DB', s, blocking=True) br_port_id = ent[b"SAI_FDB_ENTRY_ATTR_BRIDGE_PORT_ID"][oid_pfx:] - ent_type = ent[b"SAI_FDB_ENTRY_ATTR_TYPE"] if br_port_id not in self.if_br_oid_map: continue port_id = self.if_br_oid_map[br_port_id] diff --git a/scripts/neighbor_advertiser b/scripts/neighbor_advertiser index d6f280e7c5..cf36febb16 100644 --- a/scripts/neighbor_advertiser +++ b/scripts/neighbor_advertiser @@ -549,7 +549,7 @@ def main(): ferret_service_vips = args.vips operation_mode = args.mode - if operation_mode == 'set' and ferret_service_vips == None: + if operation_mode == 'set' and ferret_service_vips is None: log_warning('ferret service vip is required in set mode') sys.exit(1) diff --git a/scripts/pfcstat b/scripts/pfcstat index 3afcb34f72..106cdda84b 100755 --- a/scripts/pfcstat +++ b/scripts/pfcstat @@ -225,7 +225,6 @@ Examples: """ Print the counters of pfc rx counter """ - cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file_rx): try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_rx, 'r')) @@ -240,7 +239,6 @@ Examples: """ Print the counters of pfc tx counter """ - cnstat_cached_dict = OrderedDict() if os.path.isfile(cnstat_fqn_file_tx): try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file_tx, 'r')) diff --git a/scripts/queuestat b/scripts/queuestat index f6e9d2b466..6b14581afd 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -143,7 +143,6 @@ class Queuestat(object): Print the cnstat. """ table = [] - queue_count = len(cnstat_dict) for key, data in cnstat_dict.iteritems(): if key == 'time': @@ -170,7 +169,6 @@ class Queuestat(object): return '{:,}'.format(new - old) table = [] - queue_count = len(cnstat_new_dict) for key, cntr in cnstat_new_dict.iteritems(): if key == 'time': @@ -198,7 +196,6 @@ class Queuestat(object): for port in natsorted(self.counter_port_name_map): cnstat_dict = self.get_cnstat(self.port_queues_map[port]) - cnstat_cached_dict = OrderedDict() cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: @@ -217,7 +214,6 @@ class Queuestat(object): # Get stat for the port queried cnstat_dict = self.get_cnstat(self.port_queues_map[port]) - cnstat_cached_dict = OrderedDict() cnstat_fqn_file_name = cnstat_fqn_file + port if os.path.isfile(cnstat_fqn_file_name): try: diff --git a/scripts/sonic-kdump-config b/scripts/sonic-kdump-config index 640dd5d9b5..5f91979834 100755 --- a/scripts/sonic-kdump-config +++ b/scripts/sonic-kdump-config @@ -49,7 +49,6 @@ def run_command(cmd, use_shell=False): @param use_shell (bool) Execute subprocess with shell access ''' - pid = None try: if isinstance(cmd, list): if use_shell is False: diff --git a/scripts/teamshow b/scripts/teamshow index abb4808753..e38bf5affc 100755 --- a/scripts/teamshow +++ b/scripts/teamshow @@ -50,7 +50,7 @@ class Teamshow(object): Get the portchannel names from database. """ team_keys = self.db.keys(self.db.CONFIG_DB, PORT_CHANNEL_CFG_TABLE_PREFIX+"*") - if team_keys == None: + if team_keys is None: return self.teams = [key[len(PORT_CHANNEL_CFG_TABLE_PREFIX):] for key in team_keys] diff --git a/sfputil/main.py b/sfputil/main.py index 654cb8d380..abfadae94e 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -142,8 +142,6 @@ def get_sfp_eeprom_status_string(port, port_sfp_eeprom_status): # logical_port if logical and not ganged # def get_physical_port_name(logical_port, physical_port, ganged): - port_name = None - if logical_port == physical_port: return logical_port elif ganged: @@ -344,7 +342,6 @@ def load_platform_sfputil(): # Load platform module from source platform_path = "/".join([PLATFORM_ROOT_PATH, platform]) - hwsku_path = "/".join([platform_path, hwsku]) try: module_file = "/".join([platform_path, "plugins", PLATFORM_SPECIFIC_MODULE_NAME + ".py"]) diff --git a/show/main.py b/show/main.py index c4c0d4e493..6d2a504585 100755 --- a/show/main.py +++ b/show/main.py @@ -643,10 +643,9 @@ def is_mgmt_vrf_enabled(ctx): cmd = 'sonic-cfggen -d --var-json "MGMT_VRF_CONFIG"' p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - res = p.communicate() + stdout = p.communicate()[0] if p.returncode == 0: - p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - mvrf_dict = json.loads(p.stdout.read()) + mvrf_dict = json.loads(stdout) # if the mgmtVrfEnabled attribute is configured, check the value # and return True accordingly. @@ -698,8 +697,6 @@ def address (): config_db = ConfigDBConnector() config_db.connect() - header = ['IFNAME', 'IP Address', 'PrefixLen',] - body = [] # Fetching data from config_db for MGMT_INTERFACE mgmt_ip_data = config_db.get_table('MGMT_INTERFACE') @@ -2166,8 +2163,6 @@ def brief(verbose): vlan_ip_data = config_db.get_table('VLAN_INTERFACE') vlan_ports_data = config_db.get_table('VLAN_MEMBER') - vlan_keys = natsorted(vlan_dhcp_helper_data.keys()) - # Defining dictionaries for DHCP Helper address, Interface Gateway IP, # VLAN ports and port tagging vlan_dhcp_helper_dict = {} @@ -2637,7 +2632,6 @@ def state(redis_unix_socket_path): if redis_unix_socket_path: kwargs['unix_socket_path'] = redis_unix_socket_path - data = {} db = SonicV2Connector(host='127.0.0.1') db.connect(db.STATE_DB, False) # Make one attempt only diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 1ea71cbd81..fb8179c9c6 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -374,7 +374,6 @@ def cli(): @click.argument('url') def install(url, force, skip_migration=False): """ Install image from local binary or URL""" - cleanup_image = False if get_running_image_type() == IMAGE_TYPE_ABOOT: DEFAULT_IMAGE_PATH = ABOOT_DEFAULT_IMAGE_PATH else: @@ -443,7 +442,7 @@ def list(): click.echo("Current: " + curimage) click.echo("Next: " + nextimage) click.echo("Available: ") - for image in get_installed_images(): + for image in images: click.echo(image) # Set default image for boot @@ -518,7 +517,7 @@ def cleanup(): curimage = get_current_image() nextimage = get_next_image() image_removed = 0 - for image in get_installed_images(): + for image in images: if image != curimage and image != nextimage: click.echo("Removing image %s" % image) remove_image(image) @@ -638,7 +637,7 @@ def upgrade_docker(container_name, url, cleanup_image, skip_check, tag, warm): run_command("docker kill %s > /dev/null" % container_name) run_command("docker rm %s " % container_name) - if tag == None: + if tag is None: # example image: docker-lldp-sv2:latest tag = get_docker_tag_name(image_latest) run_command("docker tag %s:latest %s:%s" % (image_name, image_name, tag)) From ab90e7d0cbb3a7a3134f14d06140aada4d3a0287 Mon Sep 17 00:00:00 2001 From: lguohan Date: Thu, 9 Apr 2020 11:17:19 -0700 Subject: [PATCH 040/111] [doc]: add ltgm and jenkins badge (#875) Signed-off-by: Guohan Lu --- README.md | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index b673e805b4..62503b4fab 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,8 @@ +[![Total alerts](https://img.shields.io/lgtm/alerts/g/Azure/sonic-utilities.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-utilities/alerts/) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Azure/sonic-utilities.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Azure/sonic-utilities/context:python) + +[![Build](https://sonic-jenkins.westus2.cloudapp.azure.com/job/common/job/sonic-utilities-build/badge/icon)](https://sonic-jenkins.westus2.cloudapp.azure.com/job/common/job/sonic-utilities-build/) + # SONiC: Software for Open Networking in the Cloud ## sonic-utilities @@ -23,21 +28,21 @@ Guide for performing commits: * Use a standard commit message format: > [component/folder touched]: Description intent of your changes -> +> > [List of changes] -> +> > Signed-off-by: Your Name your@email.com - + For example: > swss-common: Stabilize the ConsumerTable -> +> > * Fixing autoreconf > * Fixing unit-tests by adding checkers and initialize the DB before start > * Adding the ability to select from multiple channels -> * Health-Monitor - The idea of the patch is that if something went wrong with the notification channel, +> * Health-Monitor - The idea of the patch is that if something went wrong with the notification channel, > we will have the option to know about it (Query the LLEN table length). -> +> > Signed-off-by: John Doe user@dev.null From 386beb8d83b5887fa941980d06e3df7a8f782d87 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Mon, 13 Apr 2020 07:40:12 +0300 Subject: [PATCH 041/111] Add platform options to 'show techsupport' command (#865) Signed-off-by: Shlomi Bitton --- scripts/generate_dump | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 105247e023..7d6ce5f55a 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -199,6 +199,37 @@ save_redis() { save_cmd "sonic-db-dump -n '$db_name' -y" "$db_name.json" } +############################################################################### +# Runs a 'show platform' command, append the output to 'filename' and add to the incrementally built tar. +# Globals: +# LOGDIR +# BASE +# MKDIR +# TAR +# TARFILE +# DUMPDIR +# V +# RM +# Arguments: +# type: the type of platform information +# filename: the filename to save the output as in $BASE/dump +# Returns: +# None +############################################################################### +save_platform() { + local type="$1" + local filename=$2 + local filepath="${LOGDIR}/$filename" + local tarpath="${BASE}/dump/$filename" + [ ! -d $LOGDIR ] && $MKDIR $V -p $LOGDIR + + eval "show platform $type" &>> "$filepath" + echo $'\r' >> "$filepath" + + ($TAR $V -uhf $TARFILE -C $DUMPDIR "$tarpath" \ + || abort "${ERROR_TAR_FAILED}" "tar append operation failed. Aborting to prevent data loss.") +} + ############################################################################### # Runs a comamnd and saves its output to the incrementally built tar. # Globals: @@ -327,17 +358,20 @@ main() { /proc/zoneinfo \ || abort "${ERROR_PROCFS_SAVE_FAILED}" "Proc saving operation failed. Aborting for safety." + save_platform "syseeprom" "platform" + save_platform "psustatus" "platform" + save_platform "ssdhealth" "platform" + save_platform "temperature" "platform" + save_platform "fan" "platform" + save_cmd "show version" "version" save_cmd "show platform summary" "platform.summary" - save_cmd "show platform syseeprom" "platform.syseeprom" save_cmd "cat /host/machine.conf" "machine.conf" save_cmd "docker stats --no-stream" "docker.stats" save_cmd "sensors" "sensors" - save_cmd "show platform psustatus" "platform.psustatus" save_cmd "lspci -vvv -xx" "lspci" save_cmd "lsusb -v" "lsusb" - save_cmd "sysctl -a" "sysctl" save_ip "link" "link" save_ip "addr" "addr" From fa1ed971544dd05be9dbd6f7b9ff52ee6d625cfd Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Tue, 14 Apr 2020 19:44:40 +0300 Subject: [PATCH 042/111] [fwutil]: Update Command-Reference.md. (#876) Signed-off-by: Nazarii Hnydyn --- doc/Command-Reference.md | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index f0e2b4e3e4..5112f8a429 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -77,6 +77,7 @@ * [Platform Component Firmware](#platform-component-firmware) * [Platform Component Firmware show commands](#platform-component-firmware-show-commands) * [Platform Component Firmware config commands](#platform-component-firmware-config-commands) + * [Platform Component Firmware vendor specific behaviour](#platform-component-firmware-vendor-specific-behaviour) * [Platform Specific Commands](#platform-specific-commands) * [PortChannels](#portchannels) * [PortChannel Show commands](#portchannel-show-commands) @@ -4147,6 +4148,45 @@ Supported options: Note: the default option is --image=current (current/next values are taken from `sonic_installer list`) +### Platform Component Firmware vendor specific behaviour + +#### Mellanox + +**CPLD update** + +On Mellanox platforms CPLD update can be done either for single or for all components at once. +The second approach is preferred. In this case an aggregated `vme` binary is used and +CPLD component can be specified arbitrary. + +- Example: +```bash +root@sonic:/home/admin# show platform firmware +Chassis Module Component Version Description +---------------------- -------- ----------- ----------------------- ---------------------------------------- +x86_64-mlnx_msn3800-r0 N/A BIOS 0ACLH004_02.02.007_9600 BIOS - Basic Input/Output System + CPLD1 CPLD000000_REV0400 CPLD - Complex Programmable Logic Device + CPLD2 CPLD000000_REV0300 CPLD - Complex Programmable Logic Device + CPLD3 CPLD000000_REV0300 CPLD - Complex Programmable Logic Device + CPLD4 CPLD000000_REV0100 CPLD - Complex Programmable Logic Device + +root@sonic:/home/admin# BURN_VME="$(pwd)/FUI000091_Burn_SN3800_CPLD000120_REV0600_CPLD000165_REV0400_CPLD000166_REV0300_CPLD000167_REV0100.vme" +root@sonic:/home/admin# REFRESH_VME="$(pwd)/FUI000091_Refresh_SN3800_CPLD000120_REV0600_CPLD000165_REV0400_CPLD000166_REV0300_CPLD000167_REV0100.vme" + +root@sonic:/home/admin# config platform firmware install chassis component CPLD1 fw -y ${BURN_VME} +root@sonic:/home/admin# config platform firmware install chassis component CPLD1 fw -y ${REFRESH_VME} + +root@sonic:/home/admin# show platform firmware +Chassis Module Component Version Description +---------------------- -------- ----------- ----------------------- ---------------------------------------- +x86_64-mlnx_msn3800-r0 N/A BIOS 0ACLH004_02.02.007_9600 BIOS - Basic Input/Output System + CPLD1 CPLD000000_REV0600 CPLD - Complex Programmable Logic Device + CPLD2 CPLD000000_REV0400 CPLD - Complex Programmable Logic Device + CPLD3 CPLD000000_REV0300 CPLD - Complex Programmable Logic Device + CPLD4 CPLD000000_REV0100 CPLD - Complex Programmable Logic Device +``` + +Note: the update will have the same effect if any of CPLD1/CPLD2/CPLD3/CPLD4 will be used + Go Back To [Beginning of the document](#) or [Beginning of this section](#platform-component-firmware) From 9c547eb95707259d78d689f23e73b4d7c9f6d89a Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Tue, 14 Apr 2020 09:53:56 -0700 Subject: [PATCH 043/111] [config] Implement a process level lock (#857) Changes: 1.) Implement a class, which uses hsetnx for lock. 2.) lock is expired within timeout period or will be released by owner. 3.) After -y prompt, lock is reacquired, because timer could have expired, before user enters yes. Signed-off-by: Praveen Chaudhary pchaudhary@linkedin.com --- config/main.py | 107 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 107 insertions(+) diff --git a/config/main.py b/config/main.py index 1d196826d7..086bda1c38 100755 --- a/config/main.py +++ b/config/main.py @@ -529,7 +529,98 @@ def is_ipaddress(val): return False return True +# class for locking entire config process +class ConfigDbLock(): + def __init__(self): + self.lockName = "LOCK|configDbLock" + self.timeout = 10 + self.pid = os.getpid() + self.client = None + + self._acquireLock() + return + + def _acquireLock(self): + try: + # connect to db + db_kwargs = dict() + configdb = ConfigDBConnector(**db_kwargs) + configdb.connect() + + self.client = configdb.get_redis_client('CONFIG_DB') + # Set lock and expire time. Process may get killed b/w set lock and + # expire call. + if self.client.hsetnx(self.lockName, "PID", self.pid): + self.client.expire(self.lockName, self.timeout) + # if lock exists but expire timer not running, run expire time and + # abort. + elif not self.client.ttl(self.lockName): + click.echo(":::Unable to acquire lock. Resetting timer and aborting:::"); + self.client.expire(self.lockName, self.timeout) + sys.exit(1) + else: + click.echo(":::Unable to acquire lock. Aborting:::"); + sys.exit(1) + except Exception as e: + click.echo(":::Exception: {}:::".format(e)) + sys.exit(1) + return + + def reacquireLock(self): + try: + # Try to set lock first + if self.client.hsetnx(self.lockName, "PID", self.pid): + self.client.expire(self.lockName, self.timeout) + # if lock exists, check who owns it + else: + p = self.client.pipeline(True) + # watch, we do not want to work on modified lock + p.watch(self.lockName) + # if current process holding then extend the timer + if p.hget(self.lockName, "PID") == str(self.pid): + self.client.expire(self.lockName, self.timeout) + p.unwatch() + return + else: + # some other process is holding the lock. + click.echo(":::Unable to reacquire lock (lock PID: {}, self.pid: {}):::".\ + format(p.hget(self.lockName, "PID"), self.pid)) + p.unwatch() + sys.exit(1) + except Exception as e: + click.echo(":::Exception: {}:::".format(e)) + sys.exit(1) + return + + def _releaseLock(self): + try: + p = self.client.pipeline(True) + # watch, we do not want to work on modified lock + p.watch(self.lockName) + # if current process holding the lock then release it. + if p.hget(self.lockName, "PID") == str(self.pid): + p.multi() + p.delete(self.lockName) + p.execute() + return + # lock may be None, if timer has expired before releasing lock. + elif not self.lockName: + return + else: + # some other process is holding the lock. + click.echo(":::Unable to release lock (lock PID: {}, self.pid: {}):::".\ + format(p.hget(self.lockName, "PID"), self.pid)) + p.unwatch() + except Exception as e: + click.echo(":::Exception: {}:::".format(e)) + return + + def __del__(self): + self._releaseLock() + return +# end of class configdblock +configdb_lock = ConfigDbLock() # This is our main entrypoint - the main 'config' command @click.group(context_settings=CONTEXT_SETTINGS) def config(): @@ -547,6 +638,8 @@ def config(): @click.argument('filename', default='/etc/sonic/config_db.json', type=click.Path()) def save(filename): """Export current config DB to a file on disk.""" + # reacquire lock after prompt + configdb_lock.reacquireLock() command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) @@ -557,6 +650,9 @@ def load(filename, yes): """Import a previous saved config DB dump file.""" if not yes: click.confirm('Load config from the file %s?' % filename, abort=True) + # reacquire lock after prompt + configdb_lock.reacquireLock() + command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) @@ -568,6 +664,8 @@ def reload(filename, yes, load_sysinfo): """Clear current configuration and import a previous saved config DB dump file.""" if not yes: click.confirm('Clear current config and reload config from the file %s?' % filename, abort=True) + # reacquire lock after prompt + configdb_lock.reacquireLock() log_info("'reload' executing...") @@ -617,6 +715,8 @@ def reload(filename, yes, load_sysinfo): @click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True)) def load_mgmt_config(filename): """Reconfigure hostname and mgmt interface based on device description file.""" + # reacquire lock after prompt + configdb_lock.reacquireLock() command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) #FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here @@ -639,7 +739,10 @@ def load_mgmt_config(filename): @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload config from minigraph?') def load_minigraph(): + """Reconfigure based on minigraph.""" + # reacquire lock after prompt + configdb_lock.reacquireLock() log_info("'load_minigraph' executing...") # get the device type @@ -2304,6 +2407,8 @@ def ztp(): @click.argument('run', required=False, type=click.Choice(["run"])) def run(run): """Restart ZTP of the device.""" + # reacquire lock after prompt + configdb_lock.reacquireLock() command = "ztp run -y" run_command(command, display_cmd=True) @@ -2313,6 +2418,8 @@ def run(run): @click.argument('disable', required=False, type=click.Choice(["disable"])) def disable(disable): """Administratively Disable ZTP.""" + # reacquire lock after prompt + configdb_lock.reacquireLock() command = "ztp disable -y" run_command(command, display_cmd=True) From f39704f381833f2b0173f89ae537130d55edfd36 Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Fri, 17 Apr 2020 03:20:13 -0700 Subject: [PATCH 044/111] [generate_dump] Dump systemd specific information (#602) - dump: dumps the entire state of all the units - blame: provides startup time for each service - plot: generate a svg of the boot order Co-authored-by: lguohan --- scripts/generate_dump | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index 7d6ce5f55a..2611e39d92 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -358,6 +358,10 @@ main() { /proc/zoneinfo \ || abort "${ERROR_PROCFS_SAVE_FAILED}" "Proc saving operation failed. Aborting for safety." + save_cmd "systemd-analyze blame" "systemd.analyze.blame" + save_cmd "systemd-analyze dump" "systemd.analyze.dump" + save_cmd "systemd-analyze plot" "systemd.analyze.plot.svg" + save_platform "syseeprom" "platform" save_platform "psustatus" "platform" save_platform "ssdhealth" "platform" From d59cb4af3464f00c3a612d3dd0ffa046aac82f01 Mon Sep 17 00:00:00 2001 From: lguohan Date: Sat, 18 Apr 2020 00:38:43 -0700 Subject: [PATCH 045/111] Revert "[config] Implement a process level lock (#857)" (#882) This reverts commit 3f651dc0fa15d686d01ba0aa5364f7a435b6fd1c. --- config/main.py | 107 ------------------------------------------------- 1 file changed, 107 deletions(-) diff --git a/config/main.py b/config/main.py index 086bda1c38..1d196826d7 100755 --- a/config/main.py +++ b/config/main.py @@ -529,98 +529,7 @@ def is_ipaddress(val): return False return True -# class for locking entire config process -class ConfigDbLock(): - def __init__(self): - self.lockName = "LOCK|configDbLock" - self.timeout = 10 - self.pid = os.getpid() - self.client = None - - self._acquireLock() - return - - def _acquireLock(self): - try: - # connect to db - db_kwargs = dict() - configdb = ConfigDBConnector(**db_kwargs) - configdb.connect() - - self.client = configdb.get_redis_client('CONFIG_DB') - # Set lock and expire time. Process may get killed b/w set lock and - # expire call. - if self.client.hsetnx(self.lockName, "PID", self.pid): - self.client.expire(self.lockName, self.timeout) - # if lock exists but expire timer not running, run expire time and - # abort. - elif not self.client.ttl(self.lockName): - click.echo(":::Unable to acquire lock. Resetting timer and aborting:::"); - self.client.expire(self.lockName, self.timeout) - sys.exit(1) - else: - click.echo(":::Unable to acquire lock. Aborting:::"); - sys.exit(1) - except Exception as e: - click.echo(":::Exception: {}:::".format(e)) - sys.exit(1) - return - - def reacquireLock(self): - try: - # Try to set lock first - if self.client.hsetnx(self.lockName, "PID", self.pid): - self.client.expire(self.lockName, self.timeout) - # if lock exists, check who owns it - else: - p = self.client.pipeline(True) - # watch, we do not want to work on modified lock - p.watch(self.lockName) - # if current process holding then extend the timer - if p.hget(self.lockName, "PID") == str(self.pid): - self.client.expire(self.lockName, self.timeout) - p.unwatch() - return - else: - # some other process is holding the lock. - click.echo(":::Unable to reacquire lock (lock PID: {}, self.pid: {}):::".\ - format(p.hget(self.lockName, "PID"), self.pid)) - p.unwatch() - sys.exit(1) - except Exception as e: - click.echo(":::Exception: {}:::".format(e)) - sys.exit(1) - return - - def _releaseLock(self): - try: - p = self.client.pipeline(True) - # watch, we do not want to work on modified lock - p.watch(self.lockName) - # if current process holding the lock then release it. - if p.hget(self.lockName, "PID") == str(self.pid): - p.multi() - p.delete(self.lockName) - p.execute() - return - # lock may be None, if timer has expired before releasing lock. - elif not self.lockName: - return - else: - # some other process is holding the lock. - click.echo(":::Unable to release lock (lock PID: {}, self.pid: {}):::".\ - format(p.hget(self.lockName, "PID"), self.pid)) - p.unwatch() - except Exception as e: - click.echo(":::Exception: {}:::".format(e)) - return - - def __del__(self): - self._releaseLock() - return -# end of class configdblock -configdb_lock = ConfigDbLock() # This is our main entrypoint - the main 'config' command @click.group(context_settings=CONTEXT_SETTINGS) def config(): @@ -638,8 +547,6 @@ def config(): @click.argument('filename', default='/etc/sonic/config_db.json', type=click.Path()) def save(filename): """Export current config DB to a file on disk.""" - # reacquire lock after prompt - configdb_lock.reacquireLock() command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) @@ -650,9 +557,6 @@ def load(filename, yes): """Import a previous saved config DB dump file.""" if not yes: click.confirm('Load config from the file %s?' % filename, abort=True) - # reacquire lock after prompt - configdb_lock.reacquireLock() - command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) @@ -664,8 +568,6 @@ def reload(filename, yes, load_sysinfo): """Clear current configuration and import a previous saved config DB dump file.""" if not yes: click.confirm('Clear current config and reload config from the file %s?' % filename, abort=True) - # reacquire lock after prompt - configdb_lock.reacquireLock() log_info("'reload' executing...") @@ -715,8 +617,6 @@ def reload(filename, yes, load_sysinfo): @click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True)) def load_mgmt_config(filename): """Reconfigure hostname and mgmt interface based on device description file.""" - # reacquire lock after prompt - configdb_lock.reacquireLock() command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) #FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here @@ -739,10 +639,7 @@ def load_mgmt_config(filename): @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload config from minigraph?') def load_minigraph(): - """Reconfigure based on minigraph.""" - # reacquire lock after prompt - configdb_lock.reacquireLock() log_info("'load_minigraph' executing...") # get the device type @@ -2407,8 +2304,6 @@ def ztp(): @click.argument('run', required=False, type=click.Choice(["run"])) def run(run): """Restart ZTP of the device.""" - # reacquire lock after prompt - configdb_lock.reacquireLock() command = "ztp run -y" run_command(command, display_cmd=True) @@ -2418,8 +2313,6 @@ def run(run): @click.argument('disable', required=False, type=click.Choice(["disable"])) def disable(disable): """Administratively Disable ZTP.""" - # reacquire lock after prompt - configdb_lock.reacquireLock() command = "ztp disable -y" run_command(command, display_cmd=True) From c4c14f3d6b4d8ecc350484b8022f5a51a6fdf667 Mon Sep 17 00:00:00 2001 From: Travis Van Duyn Date: Tue, 21 Apr 2020 14:24:18 -0700 Subject: [PATCH 046/111] [show][bgp] Use only 'show ip bgp' as the base and use bgp_frr_v4 file for FRR routing stack (#884) Co-authored-by: Travis Van Duyn --- show/bgp_frr_v4.py | 47 ++++++++++++++++++++++++++++++++++++++++++++++ show/main.py | 18 ++++-------------- 2 files changed, 51 insertions(+), 14 deletions(-) create mode 100644 show/bgp_frr_v4.py diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py new file mode 100644 index 0000000000..73075e5aec --- /dev/null +++ b/show/bgp_frr_v4.py @@ -0,0 +1,47 @@ +import click +from show.main import ip, run_command, get_bgp_summary_extended + + +############################################################################### +# +# 'show ip bgp' cli stanza +# +############################################################################### + + +@ip.group() +def bgp(): + """Show IPv4 BGP (Border Gateway Protocol) information""" + pass + + +# 'summary' subcommand ("show ip bgp summary") +@bgp.command() +def summary(): + """Show summarized information of IPv4 BGP state""" + try: + device_output = run_command('sudo vtysh -c "show ip bgp summary"', return_cmd=True) + get_bgp_summary_extended(device_output) + except Exception: + run_command('sudo vtysh -c "show ip bgp summary"') + + +# 'neighbors' subcommand ("show ip bgp neighbors") +@bgp.command() +@click.argument('ipaddress', required=False) +@click.argument('info_type', type=click.Choice(['routes', 'advertised-routes', 'received-routes']), required=False) +def neighbors(ipaddress, info_type): + """Show IP (IPv4) BGP neighbors""" + + command = 'sudo vtysh -c "show ip bgp neighbor' + + if ipaddress is not None: + command += ' {}'.format(ipaddress) + + # info_type is only valid if ipaddress is specified + if info_type is not None: + command += ' {}'.format(info_type) + + command += '"' + + run_command(command) diff --git a/show/main.py b/show/main.py index 6d2a504585..73e6fb30bd 100755 --- a/show/main.py +++ b/show/main.py @@ -1585,26 +1585,16 @@ def protocol(verbose): # Inserting BGP functionality into cli's show parse-chain. # BGP commands are determined by the routing-stack being elected. # -from .bgp_quagga_v4 import bgp -ip.add_command(bgp) - if routing_stack == "quagga": + from .bgp_quagga_v4 import bgp + ip.add_command(bgp) from .bgp_quagga_v6 import bgp ipv6.add_command(bgp) elif routing_stack == "frr": + from .bgp_frr_v4 import bgp + ip.add_command(bgp) from .bgp_frr_v6 import bgp ipv6.add_command(bgp) - @cli.command() - @click.argument('bgp_args', nargs = -1, required = False) - @click.option('--verbose', is_flag=True, help="Enable verbose output") - def bgp(bgp_args, verbose): - """Show BGP information""" - bgp_cmd = "show bgp" - for arg in bgp_args: - bgp_cmd += " " + str(arg) - cmd = 'sudo vtysh -c "{}"'.format(bgp_cmd) - run_command(cmd, display_cmd=verbose) - # # 'lldp' group ("show lldp ...") From 6c448d7141bd23374a521ae1e61b522620624f3e Mon Sep 17 00:00:00 2001 From: Dong Zhang <41927498+dzhangalibaba@users.noreply.github.com> Date: Wed, 22 Apr 2020 19:46:23 -0700 Subject: [PATCH 047/111] [Vxlan] : adding show vnet/vxlan cmds (#880) * [Vxlan] : adding show vnet/vxlan cmds --- doc/Command-Reference.md | 187 ++++++++++++++++++++++++++ show/main.py | 284 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 471 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 5112f8a429..a85b672cfe 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -104,6 +104,11 @@ * [VLAN Config commands](#vlan-config-commands) * [FDB](#fdb) * [FDB show commands](#fdb-show-commands) +* [VxLAN & Vnet](#vxlan--vnet) + * [VxLAN](#vxlan) + * [VxLAN show commands](#vxlan-show-commands) + * [Vnet](#vnet) + * [Vnet show commands](#vnet-show-commands) * [Warm Reboot](#warm-reboot) * [Warm Restart](#warm-restart) * [Warm Restart show commands](#warm-restart-show-commands) @@ -5608,6 +5613,188 @@ Clear the FDB table Go Back To [Beginning of the document](#) or [Beginning of this section](#vlan--FDB) +## VxLAN & Vnet + +### VxLAN + +#### VxLAN show commands + +**show vxlan tunnel** + +This command displays brief information about all the vxlans configured in the device. It displays the vxlan tunnel name, source IP address, destination IP address (if configured), tunnel map name and mapping. + +- Usage: + + ``` + show vxlan tunnel + ``` + +- Example: + + ``` + admin@sonic:~$ show vxlan tunnel + vxlan tunnel name source ip destination ip tunnel map name tunnel map mapping(vni -> vlan) + ------------------- ----------- ---------------- ----------------- --------------------------------- + tunnel1 10.10.10.10 + tunnel2 10.10.10.10 20.10.10.10 tmap1 1234 -> 100 + tunnel3 10.10.10.10 30.10.10.10 tmap2 1235 -> 200 + ``` + +**show vxlan name ** + +This command displays configuration. + +- Usage: + + ``` + show vxlan name + ``` + +- Example: + + ``` + admin@sonic:~$ show vxlan name tunnel3 + vxlan tunnel name source ip destination ip tunnel map name tunnel map mapping(vni -> vlan) + ------------------- ----------- ---------------- ----------------- --------------------------------- + tunnel3 10.10.10.10 30.10.10.10 tmap2 1235 -> 200 + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#vxlan--vnet) + +### Vnet + +#### Vnet show commands + +**show vnet brief** + +This command displays brief information about all the vnets configured in the device. It displays the vnet name, vxlan tunnel name, vni and peer list (if configured). + +- Usage: + + ``` + show vnet brief + ``` + +- Example: + + ``` + admin@sonic:~$ show vnet brief + vnet name vxlan tunnel vni peer list + ----------- -------------- ----- ------------------ + Vnet_2000 tunnel1 2000 + Vnet_3000 tunnel1 3000 Vnet_2000,Vnet4000 + ``` + +**show vnet name ** + +This command displays brief information about configured in the device. + +- Usage: + + ``` + show vnet name + ``` + +- Example: + + ``` + admin@sonic:~$ show vnet name Vnet_3000 + vnet name vxlan tunnel vni peer list + ----------- -------------- ----- ------------------ + Vnet_3000 tunnel1 3000 Vnet_2000,Vnet4000 + ``` + +**show vnet interfaces** + +This command displays vnet interfaces information about all the vnets configured in the device. + +- Usage: + + ``` + show vnet interfaces + ``` + +- Example: + + ``` + admin@sonic:~$ show vnet interfaces + vnet name interfaces + ----------- ------------ + Vnet_2000 Ethernet1 + Vnet_3000 Vlan2000 + ``` + +**show vnet neighbors** + +This command displays vnet neighbor information about all the vnets configured in the device. It displays the vnet name, neighbor IP address, neighbor mac address (if configured) and interface. + +- Usage: + + ``` + show vnet neighbors + ``` + +- Example: + + ``` + admin@sonic:~$ show vnet neighbors + Vnet_2000 neighbor mac_address interfaces + ----------- ----------- ------------- ------------ + 11.11.11.11 Ethernet1 + 11.11.11.12 Ethernet1 + + Vnet_3000 neighbor mac_address interfaces + ----------- ----------- ----------------- ------------ + 20.20.20.20 aa:bb:cc:dd:ee:ff Vlan2000 + ``` + +**show vnet routes all** + +This command displays all routes information about all the vnets configured in the device. + +- Usage: + + ``` + show vnet routes all + ``` + +- Example: + + ``` + admin@sonic:~$ show vnet routes all + vnet name prefix nexthop interface + ----------- -------------- --------- ----------- + Vnet_2000 100.100.3.0/24 Ethernet52 + Vnet_3000 100.100.4.0/24 Vlan2000 + + vnet name prefix endpoint mac address vni + ----------- -------------- ---------- ----------------- ----- + Vnet_2000 100.100.1.1/32 10.10.10.1 + Vnet_3000 100.100.2.1/32 10.10.10.2 00:00:00:00:03:04 + ``` + +**show vnet routes tunnel** + +This command displays tunnel routes information about all the vnets configured in the device. + +- Usage: + + ``` + show vnet routes tunnel + ``` + +- Example: + + ``` + admin@sonic:~$ show vnet routes tunnel + vnet name prefix endpoint mac address vni + ----------- -------------- ---------- ----------------- ----- + Vnet_2000 100.100.1.1/32 10.10.10.1 + Vnet_3000 100.100.2.1/32 10.10.10.2 00:00:00:00:03:04 + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#vxlan--vnet) + ## Warm Reboot warm-reboot command initiates a warm reboot of the device. diff --git a/show/main.py b/show/main.py index 73e6fb30bd..5fd1efd8df 100755 --- a/show/main.py +++ b/show/main.py @@ -2906,5 +2906,289 @@ def autorestart(container_name): body.append([name, container_feature_table[name]['auto_restart']]) click.echo(tabulate(body, header)) +# +# 'vnet' command ("show vnet") +# +@cli.group(cls=AliasedGroup, default_if_no_args=False) +def vnet(): + """Show vnet related information""" + pass + +@vnet.command() +@click.argument('vnet_name', required=True) +def name(vnet_name): + """Show vnet name information""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['vnet name', 'vxlan tunnel', 'vni', 'peer list'] + + # Fetching data from config_db for VNET + vnet_data = config_db.get_entry('VNET', vnet_name) + + def tablelize(vnet_key, vnet_data): + table = [] + if vnet_data: + r = [] + r.append(vnet_key) + r.append(vnet_data.get('vxlan_tunnel')) + r.append(vnet_data.get('vni')) + r.append(vnet_data.get('peer_list')) + table.append(r) + return table + + click.echo(tabulate(tablelize(vnet_name, vnet_data), header)) + +@vnet.command() +def brief(): + """Show vnet brief information""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['vnet name', 'vxlan tunnel', 'vni', 'peer list'] + + # Fetching data from config_db for VNET + vnet_data = config_db.get_table('VNET') + vnet_keys = natsorted(vnet_data.keys()) + + def tablelize(vnet_keys, vnet_data): + table = [] + for k in vnet_keys: + r = [] + r.append(k) + r.append(vnet_data[k].get('vxlan_tunnel')) + r.append(vnet_data[k].get('vni')) + r.append(vnet_data[k].get('peer_list')) + table.append(r) + return table + + click.echo(tabulate(tablelize(vnet_keys, vnet_data), header)) + +@vnet.command() +def interfaces(): + """Show vnet interfaces information""" + config_db = ConfigDBConnector() + config_db.connect() + + header = ['vnet name', 'interfaces'] + + # Fetching data from config_db for interfaces + intfs_data = config_db.get_table("INTERFACE") + vlan_intfs_data = config_db.get_table("VLAN_INTERFACE") + + vnet_intfs = {} + for k, v in intfs_data.items(): + if 'vnet_name' in v: + vnet_name = v['vnet_name'] + if vnet_name in vnet_intfs: + vnet_intfs[vnet_name].append(k) + else: + vnet_intfs[vnet_name] = [k] + + for k, v in vlan_intfs_data.items(): + if 'vnet_name' in v: + vnet_name = v['vnet_name'] + if vnet_name in vnet_intfs: + vnet_intfs[vnet_name].append(k) + else: + vnet_intfs[vnet_name] = [k] + + table = [] + for k, v in vnet_intfs.items(): + r = [] + r.append(k) + r.append(",".join(natsorted(v))) + table.append(r) + + click.echo(tabulate(table, header)) + +@vnet.command() +def neighbors(): + """Show vnet neighbors information""" + config_db = ConfigDBConnector() + config_db.connect() + + header = ['', 'neighbor', 'mac_address', 'interfaces'] + + # Fetching data from config_db for interfaces + intfs_data = config_db.get_table("INTERFACE") + vlan_intfs_data = config_db.get_table("VLAN_INTERFACE") + + vnet_intfs = {} + for k, v in intfs_data.items(): + if 'vnet_name' in v: + vnet_name = v['vnet_name'] + if vnet_name in vnet_intfs: + vnet_intfs[vnet_name].append(k) + else: + vnet_intfs[vnet_name] = [k] + + for k, v in vlan_intfs_data.items(): + if 'vnet_name' in v: + vnet_name = v['vnet_name'] + if vnet_name in vnet_intfs: + vnet_intfs[vnet_name].append(k) + else: + vnet_intfs[vnet_name] = [k] + + appl_db = swsssdk.SonicV2Connector() + appl_db.connect(appl_db.APPL_DB) + + # Fetching data from appl_db for neighbors + nbrs = appl_db.keys(appl_db.APPL_DB, "NEIGH_TABLE*") + nbrs_data = {} + for nbr in nbrs if nbrs else []: + tbl, intf, ip = nbr.split(":", 2) + mac = appl_db.get(appl_db.APPL_DB, nbr, 'neigh') + if intf in nbrs_data: + nbrs_data[intf].append((ip, mac)) + else: + nbrs_data[intf] = [(ip, mac)] + + table = [] + for k, v in vnet_intfs.items(): + v = natsorted(v) + header[0] = k + table = [] + for intf in v: + if intf in nbrs_data: + for ip, mac in nbrs_data[intf]: + r = ["", ip, mac, intf] + table.append(r) + click.echo(tabulate(table, header)) + click.echo("\n") + +@vnet.group() +def routes(): + """Show vnet routes related information""" + pass + +@routes.command() +def all(): + """Show all vnet routes""" + appl_db = swsssdk.SonicV2Connector() + appl_db.connect(appl_db.APPL_DB) + + header = ['vnet name', 'prefix', 'nexthop', 'interface'] + + # Fetching data from appl_db for VNET ROUTES + vnet_rt_keys = appl_db.keys(appl_db.APPL_DB, "VNET_ROUTE_TABLE*") + vnet_rt_keys = natsorted(vnet_rt_keys) if vnet_rt_keys else [] + + table = [] + for k in vnet_rt_keys: + r = [] + r.extend(k.split(":", 2)[1:]) + val = appl_db.get_all(appl_db.APPL_DB, k) + r.append(val.get('nexthop')) + r.append(val.get('ifname')) + table.append(r) + + click.echo(tabulate(table, header)) + + click.echo("\n") + + header = ['vnet name', 'prefix', 'endpoint', 'mac address', 'vni'] + + # Fetching data from appl_db for VNET TUNNEL ROUTES + vnet_rt_keys = appl_db.keys(appl_db.APPL_DB, "VNET_ROUTE_TUNNEL_TABLE*") + vnet_rt_keys = natsorted(vnet_rt_keys) if vnet_rt_keys else [] + + table = [] + for k in vnet_rt_keys: + r = [] + r.extend(k.split(":", 2)[1:]) + val = appl_db.get_all(appl_db.APPL_DB, k) + r.append(val.get('endpoint')) + r.append(val.get('mac_address')) + r.append(val.get('vni')) + table.append(r) + + click.echo(tabulate(table, header)) + +@routes.command() +def tunnel(): + """Show vnet tunnel routes""" + appl_db = swsssdk.SonicV2Connector() + appl_db.connect(appl_db.APPL_DB) + + header = ['vnet name', 'prefix', 'endpoint', 'mac address', 'vni'] + + # Fetching data from appl_db for VNET TUNNEL ROUTES + vnet_rt_keys = appl_db.keys(appl_db.APPL_DB, "VNET_ROUTE_TUNNEL_TABLE*") + vnet_rt_keys = natsorted(vnet_rt_keys) if vnet_rt_keys else [] + + table = [] + for k in vnet_rt_keys: + r = [] + r.extend(k.split(":", 2)[1:]) + val = appl_db.get_all(appl_db.APPL_DB, k) + r.append(val.get('endpoint')) + r.append(val.get('mac_address')) + r.append(val.get('vni')) + table.append(r) + + click.echo(tabulate(table, header)) + +# +# 'vxlan' command ("show vxlan") +# +@cli.group(cls=AliasedGroup, default_if_no_args=False) +def vxlan(): + """Show vxlan related information""" + pass + +@vxlan.command() +@click.argument('vxlan_name', required=True) +def name(vxlan_name): + """Show vxlan name information""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['vxlan tunnel name', 'source ip', 'destination ip', 'tunnel map name', 'tunnel map mapping(vni -> vlan)'] + + # Fetching data from config_db for VXLAN TUNNEL + vxlan_data = config_db.get_entry('VXLAN_TUNNEL', vxlan_name) + + table = [] + if vxlan_data: + r = [] + r.append(vxlan_name) + r.append(vxlan_data.get('src_ip')) + r.append(vxlan_data.get('dst_ip')) + vxlan_map_keys = config_db.keys(config_db.CONFIG_DB, + 'VXLAN_TUNNEL_MAP{}{}{}*'.format(config_db.KEY_SEPARATOR, vxlan_name, config_db.KEY_SEPARATOR)) + if vxlan_map_keys: + vxlan_map_mapping = config_db.get_all(config_db.CONFIG_DB, vxlan_map_keys[0]) + r.append(vxlan_map_keys[0].split(config_db.KEY_SEPARATOR, 2)[2]) + r.append("{} -> {}".format(vxlan_map_mapping.get('vni'), vxlan_map_mapping.get('vlan'))) + table.append(r) + + click.echo(tabulate(table, header)) + +@vxlan.command() +def tunnel(): + """Show vxlan tunnel information""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['vxlan tunnel name', 'source ip', 'destination ip', 'tunnel map name', 'tunnel map mapping(vni -> vlan)'] + + # Fetching data from config_db for VXLAN TUNNEL + vxlan_data = config_db.get_table('VXLAN_TUNNEL') + vxlan_keys = natsorted(vxlan_data.keys()) + + table = [] + for k in vxlan_keys: + r = [] + r.append(k) + r.append(vxlan_data[k].get('src_ip')) + r.append(vxlan_data[k].get('dst_ip')) + vxlan_map_keys = config_db.keys(config_db.CONFIG_DB, + 'VXLAN_TUNNEL_MAP{}{}{}*'.format(config_db.KEY_SEPARATOR,k, config_db.KEY_SEPARATOR)) + if vxlan_map_keys: + vxlan_map_mapping = config_db.get_all(config_db.CONFIG_DB, vxlan_map_keys[0]) + r.append(vxlan_map_keys[0].split(config_db.KEY_SEPARATOR, 2)[2]) + r.append("{} -> {}".format(vxlan_map_mapping.get('vni'), vxlan_map_mapping.get('vlan'))) + table.append(r) + + click.echo(tabulate(table, header)) + if __name__ == '__main__': cli() From 8b6cf84bed4da2b7527f6c42ac37c8afc997b2c0 Mon Sep 17 00:00:00 2001 From: Travis Van Duyn Date: Fri, 24 Apr 2020 13:44:50 -0700 Subject: [PATCH 048/111] [show] Add 'ip/ipv6 bgp network' commands (#888) Co-authored-by: Travis Van Duyn --- doc/Command-Reference.md | 67 ++++++++++++++++++++++++++++++++++++++++ show/bgp_frr_v4.py | 31 +++++++++++++++++++ show/bgp_frr_v6.py | 31 +++++++++++++++++++ 3 files changed, 129 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index a85b672cfe..d1245019df 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -1603,6 +1603,38 @@ Optionally, you can specify an IP address in order to display only that particul Click [here](#Quagga-BGP-Show-Commands) to see the example for "show ip bgp neighbors" for Quagga. +**show ip bgp network [[|] [(bestpath | multipath | longer-prefixes | json)]] + +This command displays all the details of IPv4 Border Gateway Protocol (BGP) prefixes. + +- Usage: + + + ``` + show ip bgp network [[|] [(bestpath | multipath | longer-prefixes | json)]] + ``` + +- Example: + + NOTE: The "longer-prefixes" option is only available when a network prefix with a "/" notation is used. + + ``` + admin@sonic:~$ show ip bgp network + + admin@sonic:~$ show ip bgp network 10.1.0.32 bestpath + + admin@sonic:~$ show ip bgp network 10.1.0.32 multipath + + admin@sonic:~$ show ip bgp network 10.1.0.32 json + + admin@sonic:~$ show ip bgp network 10.1.0.32/32 bestpath + + admin@sonic:~$ show ip bgp network 10.1.0.32/32 multipath + + admin@sonic:~$ show ip bgp network 10.1.0.32/32 json + + admin@sonic:~$ show ip bgp network 10.1.0.32/32 longer-prefixes + ``` **show bgp ipv6 summary (Versions >= 201904 using default FRR routing stack)** @@ -1671,6 +1703,41 @@ This command displays all the details of one particular IPv6 Border Gateway Prot Click [here](#Quagga-BGP-Show-Commands) to see the example for "show ip bgp summary" for Quagga. +**show ipv6 bgp network [[|] [(bestpath | multipath | longer-prefixes | json)]] + +This command displays all the details of IPv6 Border Gateway Protocol (BGP) prefixes. + +- Usage: + + + ``` + show ipv6 bgp network [[|] [(bestpath | multipath | longer-prefixes | json)]] + ``` + +- Example: + + NOTE: The "longer-prefixes" option is only available when a network prefix with a "/" notation is used. + + ``` + admin@sonic:~$ show ipv6 bgp network + + admin@sonic:~$ show ipv6 bgp network fc00::72 bestpath + + admin@sonic:~$ show ipv6 bgp network fc00::72 multipath + + admin@sonic:~$ show ipv6 bgp network fc00::72 json + + admin@sonic:~$ show ipv6 bgp network fc00::72/64 bestpath + + admin@sonic:~$ show ipv6 bgp network fc00::72/64 multipath + + admin@sonic:~$ show ipv6 bgp network fc00::72/64 json + + admin@sonic:~$ show ipv6 bgp network fc00::72/64 longer-prefixes + ``` + + + **show route-map** diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 73075e5aec..ce946846bf 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -45,3 +45,34 @@ def neighbors(ipaddress, info_type): command += '"' run_command(command) + +# 'network' subcommand ("show ip bgp network") +@bgp.command() +@click.argument('ipaddress', metavar='[|]', required=False) +@click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', + type=click.Choice(['bestpath', 'json', 'longer-prefixes', 'multipath']), required=False) +def network(ipaddress, info_type): + """Show IP (IPv4) BGP network""" + + command = 'sudo vtysh -c "show ip bgp' + + if ipaddress is not None: + if '/' in ipaddress: + # For network prefixes then this all info_type(s) are available + pass + else: + # For an ipaddress then check info_type, exit if specified option doesn't work. + if info_type in ['longer-prefixes']: + click.echo('The parameter option: "{}" only available if passing a network prefix'.format(info_type)) + click.echo("EX: 'show ip bgp network 10.0.0.0/24 longer-prefixes'") + raise click.Abort() + + command += ' {}'.format(ipaddress) + + # info_type is only valid if prefix/ipaddress is specified + if info_type is not None: + command += ' {}'.format(info_type) + + command += '"' + + run_command(command) diff --git a/show/bgp_frr_v6.py b/show/bgp_frr_v6.py index 46b20ad2bc..8c3ad70dde 100644 --- a/show/bgp_frr_v6.py +++ b/show/bgp_frr_v6.py @@ -36,3 +36,34 @@ def neighbors(ipaddress, info_type): info_type = "" if info_type is None else info_type command = 'sudo vtysh -c "show bgp ipv6 neighbor {} {}"'.format(ipaddress, info_type) run_command(command) + +# 'network' subcommand ("show ipv6 bgp network") +@bgp.command() +@click.argument('ipaddress', metavar='[|]', required=False) +@click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', + type=click.Choice(['bestpath', 'json', 'longer-prefixes', 'multipath']), required=False) +def network(ipaddress, info_type): + """Show BGP ipv6 network""" + + command = 'sudo vtysh -c "show bgp ipv6' + + if ipaddress is not None: + if '/' in ipaddress: + # For network prefixes then this all info_type(s) are available + pass + else: + # For an ipaddress then check info_type, exit if specified option doesn't work. + if info_type in ['longer-prefixes']: + click.echo('The parameter option: "{}" only available if passing a network prefix'.format(info_type)) + click.echo("EX: 'show ipv6 bgp network fc00:1::/64 longer-prefixes'") + raise click.Abort() + + command += ' {}'.format(ipaddress) + + # info_type is only valid if prefix/ipaddress is specified + if info_type is not None: + command += ' {}'.format(info_type) + + command += '"' + + run_command(command) From 5328c1856d88bb8102dd21ead8d563ae4b6eb190 Mon Sep 17 00:00:00 2001 From: Ying Xie Date: Mon, 27 Apr 2020 08:35:39 -0700 Subject: [PATCH 049/111] [fast reboot] set a fast-reboot DB flag (#887) - This flag is currently no functional impact with just this change. - This flag is intended to coordinate fast-reboot shutdown path. - This flag enables moving fast-reboot per service knowledge to service scripts. which unblocks moving warm-reboot per service knowledge to service scripts. - This flag is consistent with the bootup flag we setting on the boot up path. Signed-off-by: Ying Xie --- scripts/fast-reboot | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index faea099806..893a619617 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -92,7 +92,7 @@ function parseOptions() done } -function clear_fast_boot() +function common_clear() { debug "${REBOOT_TYPE} failure ($?) cleanup ..." @@ -101,9 +101,16 @@ function clear_fast_boot() teardown_control_plane_assistant } +function clear_fast_boot() +{ + common_clear + + redis-cli -n 6 DEL "FAST_REBOOT|system" &>/dev/null || /bin/true +} + function clear_warm_boot() { - clear_fast_boot + common_clear result=`timeout 10s config warm_restart disable; if [[ $? == 124 ]]; then echo timeout; else echo "code ($?)"; fi` || /bin/true debug "Cancel warm-reboot: ${result}" @@ -308,6 +315,7 @@ case "$REBOOT_TYPE" in "fast-reboot") BOOT_TYPE_ARG=$REBOOT_TYPE trap clear_fast_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM + redis-cli -n 6 SET "FAST_REBOOT|system" "1" "EX" "180" &>/dev/null ;; "warm-reboot") if [[ "$sonic_asic_type" == "mellanox" ]]; then From 7fb0f35498370bed247cd3b09de611346044d0b5 Mon Sep 17 00:00:00 2001 From: Tamer Ahmed Date: Mon, 27 Apr 2020 20:48:02 -0700 Subject: [PATCH 050/111] [utility] Filter FDB entries (#890) * [utility] Filter FDB entries FDB table can get large due to VM creation/deletion which cause fast reboot to slow down. This utility fitlers FDB entries based on current MAC entries in the ARP table. signed-off-by: Tamer Ahmed --- scripts/fast-reboot | 15 +- scripts/filter_fdb_entries.py | 127 + setup.py | 3 +- .../filter_fdb_entries_test.py | 173 + .../filter_fdb_input/__init__.py | 0 .../filter_fdb_input/arp.json | 408 ++ .../filter_fdb_input/expected_fdb.json | 401 ++ .../filter_fdb_input/fdb.json | 3516 +++++++++++++++++ .../filter_fdb_input/test_vectors.py | 69 + 9 files changed, 4709 insertions(+), 3 deletions(-) create mode 100755 scripts/filter_fdb_entries.py create mode 100644 sonic-utilities-tests/filter_fdb_entries_test.py create mode 100644 sonic-utilities-tests/filter_fdb_input/__init__.py create mode 100644 sonic-utilities-tests/filter_fdb_input/arp.json create mode 100644 sonic-utilities-tests/filter_fdb_input/expected_fdb.json create mode 100644 sonic-utilities-tests/filter_fdb_input/fdb.json create mode 100644 sonic-utilities-tests/filter_fdb_input/test_vectors.py diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 893a619617..4313af5d0a 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -29,6 +29,7 @@ EXIT_NEXT_IMAGE_NOT_EXISTS=4 EXIT_ORCHAGENT_SHUTDOWN=10 EXIT_SYNCD_SHUTDOWN=11 EXIT_FAST_REBOOT_DUMP_FAILURE=12 +EXIT_FILTER_FDB_ENTRIES_FAILURE=13 EXIT_NO_CONTROL_PLANE_ASSISTANT=20 function error() @@ -378,14 +379,24 @@ fi if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then # Dump the ARP and FDB tables to files also as default routes for both IPv4 and IPv6 # into /host/fast-reboot - mkdir -p /host/fast-reboot + DUMP_DIR=/host/fast-reboot + mkdir -p $DUMP_DIR FAST_REBOOT_DUMP_RC=0 - /usr/bin/fast-reboot-dump.py -t /host/fast-reboot || FAST_REBOOT_DUMP_RC=$? + /usr/bin/fast-reboot-dump.py -t $DUMP_DIR || FAST_REBOOT_DUMP_RC=$? if [[ FAST_REBOOT_DUMP_RC -ne 0 ]]; then error "Failed to run fast-reboot-dump.py. Exit code: $FAST_REBOOT_DUMP_RC" unload_kernel exit "${EXIT_FAST_REBOOT_DUMP_FAILURE}" fi + + FILTER_FDB_ENTRIES_RC=0 + # Filter FDB entries using MAC addresses from ARP table + /usr/bin/filter_fdb_entries.py -f $DUMP_DIR/fdb.json -a $DUMP_DIR/arp.json || FILTER_FDB_ENTRIES_RC=$? + if [[ FILTER_FDB_ENTRIES_RC -ne 0 ]]; then + error "Failed to filter FDb entries. Exit code: $FILTER_FDB_ENTRIES_RC" + unload_kernel + exit "${EXIT_FILTER_FDB_ENTRIES_FAILURE}" + fi fi init_warm_reboot_states diff --git a/scripts/filter_fdb_entries.py b/scripts/filter_fdb_entries.py new file mode 100755 index 0000000000..1efe30ebe4 --- /dev/null +++ b/scripts/filter_fdb_entries.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python + +import json +import sys +import os +import argparse +import syslog +import traceback +import time + +from collections import defaultdict + +def get_arp_entries_map(filename): + """ + Generate map for ARP entries + + ARP entry map is using the MAC as a key for the arp entry. The map key is reformated in order + to match FDB table formatting + + Args: + filename(str): ARP entry file name + + Returns: + arp_map(dict) map of ARP entries using MAC as key. + """ + with open(filename, 'r') as fp: + arp_entries = json.load(fp) + + arp_map = defaultdict() + for arp in arp_entries: + for key, config in arp.items(): + if 'NEIGH_TABLE' in key: + arp_map[config["neigh"].replace(':', '-')] = "" + + return arp_map + +def filter_fdb_entries(fdb_filename, arp_filename, backup_file): + """ + Filter FDB entries based on MAC presence into ARP entries + + FDB entries that do not have MAC entry in the ARP table are filtered out. New FDB entries + file will be created if it has fewer entries than original one. + + Args: + fdb_filename(str): FDB entries file name + arp_filename(str): ARP entry file name + backup_file(bool): Create backup copy of FDB file before creating new one + + Returns: + None + """ + arp_map = get_arp_entries_map(arp_filename) + + with open(fdb_filename, 'r') as fp: + fdb_entries = json.load(fp) + + def filter_fdb_entry(fdb_entry): + for key, _ in fdb_entry.items(): + if 'FDB_TABLE' in key: + return key.split(':')[-1] in arp_map + + # malformed entry, default to False so it will be deleted + return False + + new_fdb_entries = list(filter(filter_fdb_entry, fdb_entries)) + + if len(new_fdb_entries) < len(fdb_entries): + if backup_file: + os.rename(fdb_filename, fdb_filename + '-' + time.strftime("%Y%m%d-%H%M%S")) + + with open(fdb_filename, 'w') as fp: + json.dump(new_fdb_entries, fp, indent=2, separators=(',', ': ')) + +def file_exists_or_raise(filename): + """ + Check if file exists on the file system + + Args: + filename(str): File name + + Returns: + None + + Raises: + Exception file does not exist + """ + if not os.path.exists(filename): + raise Exception("file '{0}' does not exist".format(filename)) + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-f', '--fdb', type=str, default='/tmp/fdb.json', help='fdb file name') + parser.add_argument('-a', '--arp', type=str, default='/tmp/arp.json', help='arp file name') + parser.add_argument('-b', '--backup_file', type=bool, default=True, help='Back up old fdb entries file') + args = parser.parse_args() + + fdb_filename = args.fdb + arp_filename = args.arp + backup_file = args.backup_file + + try: + file_exists_or_raise(fdb_filename) + file_exists_or_raise(arp_filename) + except Exception as e: + syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc())) + else: + filter_fdb_entries(fdb_filename, arp_filename, backup_file) + + return 0 + +if __name__ == '__main__': + res = 0 + try: + syslog.openlog('filter_fdb_entries') + res = main() + except KeyboardInterrupt: + syslog.syslog(syslog.LOG_NOTICE, "SIGINT received. Quitting") + res = 1 + except Exception as e: + syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc())) + res = 2 + finally: + syslog.closelog() + try: + sys.exit(res) + except SystemExit: + os._exit(res) diff --git a/setup.py b/setup.py index 47bd6e2fb0..f2abbefc38 100644 --- a/setup.py +++ b/setup.py @@ -54,7 +54,7 @@ ], package_data={ 'show': ['aliases.ini'], - 'sonic-utilities-tests': ['acl_input/*', 'mock_tables/*.py', 'mock_tables/*.json'] + 'sonic-utilities-tests': ['acl_input/*', 'mock_tables/*.py', 'mock_tables/*.json', 'filter_fdb_input/*'] }, scripts=[ 'scripts/aclshow', @@ -74,6 +74,7 @@ 'scripts/fast-reboot-dump.py', 'scripts/fdbclear', 'scripts/fdbshow', + 'scripts/filter_fdb_entries.py', 'scripts/generate_dump', 'scripts/intfutil', 'scripts/intfstat', diff --git a/sonic-utilities-tests/filter_fdb_entries_test.py b/sonic-utilities-tests/filter_fdb_entries_test.py new file mode 100644 index 0000000000..22abeb1f28 --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_entries_test.py @@ -0,0 +1,173 @@ +import glob +import json +import os +import pytest +import shutil +import subprocess + +from collections import defaultdict +from filter_fdb_input.test_vectors import filterFdbEntriesTestVector + +class TestFilterFdbEntries(object): + """ + Test Filter FDb entries + """ + ARP_FILENAME = "/tmp/arp.json" + FDB_FILENAME = "/tmp/fdb.json" + EXPECTED_FDB_FILENAME = "/tmp/expected_fdb.json" + + def __setUp(self, testData): + """ + Sets up test data + + Builds arp.json and fdb.json input files to /tmp and also build expected fdb entries files int /tmp + + Args: + testData(dist): Current test vector data + + Returns: + None + """ + def create_file_or_raise(data, filename): + """ + Create test data files + + If the data is string, it will be dump to a json filename. + If data is a file, it will be coppied to filename + + Args: + data(str|list): source of test data + filename(str): filename for test data + + Returns: + None + + Raises: + Exception if data type is not supported + """ + if isinstance(data, list): + with open(filename, 'w') as fp: + json.dump(data, fp, indent=2, separators=(',', ': ')) + elif isinstance(data, str): + shutil.copyfile(data, filename) + else: + raise Exception("Unknown test data type: {0}".format(type(test_data))) + + create_file_or_raise(testData["arp"], self.ARP_FILENAME) + create_file_or_raise(testData["fdb"], self.FDB_FILENAME) + create_file_or_raise(testData["expected_fdb"], self.EXPECTED_FDB_FILENAME) + + def __tearDown(self): + """ + Tear down current test case setup + + Args: + None + + Returns: + None + """ + os.remove(self.ARP_FILENAME) + os.remove(self.EXPECTED_FDB_FILENAME) + fdbFiles = glob.glob(self.FDB_FILENAME + '*') + for file in fdbFiles: + os.remove(file) + + def __runCommand(self, cmds): + """ + Runs command 'cmds' on host + + Args: + cmds(list): command to be run on localhost + + Returns: + stdout(str): stdout gathered during command execution + stderr(str): stderr gathered during command execution + returncode(int): command exit code + """ + process = subprocess.Popen( + cmds, + shell=False, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + stdout, stderr = process.communicate() + + return stdout, stderr, process.returncode + + def __getFdbEntriesMap(self, filename): + """ + Generate map for FDB entries + + FDB entry map is using the FDB_TABLE:... as a key for the FDB entry. + + Args: + filename(str): FDB entry file name + + Returns: + fdbMap(defaultdict) map of FDB entries using MAC as key. + """ + with open(filename, 'r') as fp: + fdbEntries = json.load(fp) + + fdbMap = defaultdict() + for fdb in fdbEntries: + for key, config in fdb.items(): + if "FDB_TABLE" in key: + fdbMap[key] = fdb + + return fdbMap + + def __verifyOutput(self): + """ + Verifies FDB entries match expected FDB entries + + Args: + None + + Retruns: + isEqual(bool): True if FDB entries match, False otherwise + """ + fdbMap = self.__getFdbEntriesMap(self.FDB_FILENAME) + with open(self.EXPECTED_FDB_FILENAME, 'r') as fp: + expectedFdbEntries = json.load(fp) + + isEqual = len(fdbMap) == len(expectedFdbEntries) + if isEqual: + for expectedFdbEntry in expectedFdbEntries: + fdbEntry = {} + for key, config in expectedFdbEntry.items(): + if "FDB_TABLE" in key: + fdbEntry = fdbMap[key] + + isEqual = len(fdbEntry) == len(expectedFdbEntry) + for key, config in expectedFdbEntry.items(): + isEqual = isEqual and fdbEntry[key] == config + + if not isEqual: + break + + return isEqual + + @pytest.mark.parametrize("testData", filterFdbEntriesTestVector) + def testFilterFdbEntries(self, testData): + """ + Test Filter FDB entries script + + Args: + testData(dict): Map containing ARP entries, FDB entries, and expected FDB entries + """ + try: + self.__setUp(testData) + + stdout, stderr, rc = self.__runCommand([ + "scripts/filter_fdb_entries.py", + "-a", + self.ARP_FILENAME, + "-f", + self.FDB_FILENAME, + ]) + assert rc == 0, "CFilter_fbd_entries.py failed with '{0}'".format(stderr) + assert self.__verifyOutput(), "Test failed for test data: {0}".format(testData) + finally: + self.__tearDown() diff --git a/sonic-utilities-tests/filter_fdb_input/__init__.py b/sonic-utilities-tests/filter_fdb_input/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/sonic-utilities-tests/filter_fdb_input/arp.json b/sonic-utilities-tests/filter_fdb_input/arp.json new file mode 100644 index 0000000000..884943f6cf --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_input/arp.json @@ -0,0 +1,408 @@ +[ + { + "NEIGH_TABLE:Vlan1000:192.168.1.85": { + "neigh": "72:06:00:01:03:39", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.131": { + "neigh": "72:06:00:01:01:29", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.83": { + "neigh": "72:06:00:01:03:37", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.216": { + "neigh": "72:06:00:01:04:70", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.59": { + "neigh": "72:06:00:01:03:13", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.144": { + "neigh": "72:06:00:01:01:42", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.95": { + "neigh": "72:06:00:01:00:93", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.184": { + "neigh": "72:06:00:01:01:82", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.22": { + "neigh": "72:06:00:01:00:20", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.12": { + "neigh": "24:8a:07:4c:f5:18", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.14": { + "neigh": "72:06:00:01:02:68", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.226": { + "neigh": "72:06:00:01:04:80", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.160": { + "neigh": "72:06:00:01:04:14", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.121": { + "neigh": "72:06:00:01:01:19", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.220": { + "neigh": "72:06:00:01:04:74", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.127": { + "neigh": "72:06:00:01:03:81", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.247": { + "neigh": "72:06:00:01:02:45", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.108": { + "neigh": "72:06:00:01:01:06", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.227": { + "neigh": "72:06:00:01:02:25", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.17": { + "neigh": "72:06:00:01:00:15", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.58": { + "neigh": "72:06:00:01:00:56", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.101": { + "neigh": "72:06:00:01:00:99", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.222": { + "neigh": "72:06:00:01:04:76", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.180": { + "neigh": "72:06:00:01:01:78", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.151": { + "neigh": "72:06:00:01:01:49", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.175": { + "neigh": "72:06:00:01:01:73", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.153": { + "neigh": "72:06:00:01:01:51", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.211": { + "neigh": "72:06:00:01:02:09", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.89": { + "neigh": "72:06:00:01:03:43", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.223": { + "neigh": "72:06:00:01:04:77", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.252": { + "neigh": "72:06:00:01:02:50", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.123": { + "neigh": "72:06:00:01:03:77", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.48": { + "neigh": "72:06:00:01:00:46", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.120": { + "neigh": "72:06:00:01:01:18", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.1.188": { + "neigh": "72:06:00:01:04:42", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.101": { + "neigh": "72:06:00:01:03:55", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.111": { + "neigh": "72:06:00:01:01:09", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.0.224": { + "neigh": "72:06:00:01:02:22", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.226": { + "neigh": "72:06:00:01:02:24", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.1.180": { + "neigh": "72:06:00:01:04:34", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.179": { + "neigh": "72:06:00:01:01:77", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.204": { + "neigh": "72:06:00:01:02:02", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.1.55": { + "neigh": "72:06:00:01:03:09", + "family": "IPv4" + } + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.0.105": { + "neigh": "72:06:00:01:01:03", + "family": "IPv4" + } + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.1.82": { + "neigh": "72:06:00:01:03:36", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.18": { + "neigh": "72:06:00:01:02:72", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.1.77": { + "neigh": "72:06:00:01:03:31", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.65": { + "neigh": "72:06:00:01:03:19", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.1.236": { + "neigh": "72:06:00:01:04:90", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.244": { + "neigh": "72:06:00:01:04:98", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.111": { + "neigh": "72:06:00:01:03:65", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.143": { + "neigh": "72:06:00:01:01:41", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.26": { + "neigh": "72:06:00:01:02:80", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.98": { + "neigh": "72:06:00:01:00:96", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.1.52": { + "neigh": "72:06:00:01:03:06", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.51": { + "neigh": "72:06:00:01:00:49", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "OP": "SET", + "NEIGH_TABLE:Vlan1000:192.168.0.6": { + "neigh": "72:06:00:01:00:04", + "family": "IPv4" + } + }, + { + "NEIGH_TABLE:Vlan1000:192.168.0.150": { + "neigh": "72:06:00:01:01:48", + "family": "IPv4" + }, + "OP": "SET" + } +] diff --git a/sonic-utilities-tests/filter_fdb_input/expected_fdb.json b/sonic-utilities-tests/filter_fdb_input/expected_fdb.json new file mode 100644 index 0000000000..1993cf24a3 --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_input/expected_fdb.json @@ -0,0 +1,401 @@ +[ + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-90": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-25": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-73": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-39": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-09": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-36": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-41": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-19": { + "type": "dynamic", + "port": "Ethernet56" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-37": { + "type": "dynamic", + "port": "Ethernet27" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-49": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-77": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-80": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-82": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-09": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-06": { + "type": "dynamic", + "port": "Ethernet32" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-20": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-96": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-34": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-15": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-31": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-93": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-48": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-02": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-65": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-19": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-09": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-72": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-51": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-74": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-98": { + "type": "dynamic", + "port": "Ethernet55" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-80": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-42": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-99": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-18": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-43": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-46": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-76": { + "type": "dynamic", + "port": "Ethernet22" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-81": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-70": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-68": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-78": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-50": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-77": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-55": { + "type": "dynamic", + "port": "Ethernet56" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-49": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-22": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-03": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-77": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-42": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-56": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-24": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-13": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-45": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-29": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-04": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-06": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-14": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + } +] \ No newline at end of file diff --git a/sonic-utilities-tests/filter_fdb_input/fdb.json b/sonic-utilities-tests/filter_fdb_input/fdb.json new file mode 100644 index 0000000000..6110936b33 --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_input/fdb.json @@ -0,0 +1,3516 @@ +[ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-73": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-96": { + "type": "dynamic", + "port": "Ethernet8" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-57": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-27": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-48": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-99": { + "type": "dynamic", + "port": "Ethernet11" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-02": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-19": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-65": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-39": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-64": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-31": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-90": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-25": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-56": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-27": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-10": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-24": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-03": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-26": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-76": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-73": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-03": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-22": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-07": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-08": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-93": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-90": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-79": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-61": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-53": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-00": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-73": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-54": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-06": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-97": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-22": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-01": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-85": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-83": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-39": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-22": { + "type": "dynamic", + "port": "Ethernet28" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-90": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-10": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-53": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-34": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-91": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-32": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-20": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-39": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-23": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-82": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-64": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-98": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-64": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-09": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-78": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-63": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-64": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-71": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-05": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-51": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-36": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-41": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-15": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-87": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-85": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-41": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-30": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-95": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-93": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-66": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-47": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-57": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-37": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-36": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-94": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-22": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-19": { + "type": "dynamic", + "port": "Ethernet56" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-37": { + "type": "dynamic", + "port": "Ethernet27" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-78": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-95": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-23": { + "type": "dynamic", + "port": "Ethernet15" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-92": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-35": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-83": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-96": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-84": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-62": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-67": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-86": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-88": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-89": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-88": { + "type": "dynamic", + "port": "Ethernet8" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-79": { + "type": "dynamic", + "port": "Ethernet7" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-59": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-55": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-89": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-51": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-93": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-00": { + "type": "dynamic", + "port": "Ethernet53" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-39": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-28": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-62": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-49": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-38": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-77": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-25": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-88": { + "type": "dynamic", + "port": "Ethernet22" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-07": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-16": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-67": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-80": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-05": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-02": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-33": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-21": { + "type": "dynamic", + "port": "Ethernet38" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-59": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-54": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-75": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-81": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-49": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-29": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-08": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-82": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-41": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-25": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-21": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-02": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-81": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-78": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-09": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-06": { + "type": "dynamic", + "port": "Ethernet32" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-64": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-46": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-60": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-66": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-44": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-17": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-58": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-13": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-30": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-28": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-52": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-20": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-55": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-33": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-72": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-47": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-52": { + "type": "dynamic", + "port": "Ethernet8" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-91": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-43": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-37": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-54": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-93": { + "type": "dynamic", + "port": "Ethernet27" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-61": { + "type": "dynamic", + "port": "Ethernet54" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-91": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-63": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-73": { + "type": "dynamic", + "port": "Ethernet27" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-96": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-45": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-60": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-34": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-60": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-12": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-63": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-31": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-59": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-68": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-98": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-45": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-24": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-15": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-16": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-82": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-03": { + "type": "dynamic", + "port": "Ethernet15" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-61": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-94": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-84": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-11": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-97": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-45": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-34": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-50": { + "type": "dynamic", + "port": "Ethernet28" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-30": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-44": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-31": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-51": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-93": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-48": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-70": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-75": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-46": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-02": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-35": { + "type": "dynamic", + "port": "Ethernet52" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-26": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-65": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-45": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-62": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-04": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-91": { + "type": "dynamic", + "port": "Ethernet56" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-37": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-10": { + "type": "dynamic", + "port": "Ethernet28" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-83": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-75": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-86": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-71": { + "type": "dynamic", + "port": "Ethernet52" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-21": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-19": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-09": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-08": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-69": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-71": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-86": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-02": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-20": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-69": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-84": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-71": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-79": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:24-8A-07-4C-F5-18": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-33": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-41": { + "type": "dynamic", + "port": "Ethernet42" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-39": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-01": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-52": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-72": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-27": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-69": { + "type": "dynamic", + "port": "Ethernet9" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-40": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-38": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-87": { + "type": "dynamic", + "port": "Ethernet29" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-53": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-87": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-10": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-59": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-96": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-61": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-51": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-00": { + "type": "dynamic", + "port": "Ethernet8" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-07": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-79": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-56": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-84": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-68": { + "type": "dynamic", + "port": "Ethernet22" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-38": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-54": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-21": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-56": { + "type": "dynamic", + "port": "Ethernet12" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-96": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-74": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-09": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-98": { + "type": "dynamic", + "port": "Ethernet55" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-33": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-98": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-48": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-80": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-82": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-17": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-62": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-34": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-80": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-50": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-69": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-42": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-72": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-52": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-52": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-99": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-73": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-65": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-63": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-83": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-32": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-35": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-58": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-13": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-76": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-77": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-25": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-51": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-76": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-08": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-43": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-16": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-11": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-55": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-11": { + "type": "dynamic", + "port": "Ethernet40" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-86": { + "type": "dynamic", + "port": "Ethernet48" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-19": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-15": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-63": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-04": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-11": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-18": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-43": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-94": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-46": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-69": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-72": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-99": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-53": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-18": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-20": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-23": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-37": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-89": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-76": { + "type": "dynamic", + "port": "Ethernet22" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-26": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-12": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-34": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-82": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-67": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-92": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-07": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-84": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-60": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-68": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-08": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-78": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-81": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-35": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-36": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-97": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-58": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-44": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-80": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-81": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-60": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-95": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-38": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-17": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-74": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-07": { + "type": "dynamic", + "port": "Ethernet15" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-98": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-50": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-81": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-29": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-15": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-75": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-14": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-54": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-70": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-85": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-58": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-68": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-21": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-26": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-49": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-74": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-09": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-89": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-28": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-78": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-23": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-24": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-31": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-14": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-94": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-48": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-95": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-40": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-86": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-47": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-99": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-00": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-66": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-18": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-50": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-47": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-03": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-49": { + "type": "dynamic", + "port": "Ethernet58" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-30": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-27": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-65": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-99": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-26": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-43": { + "type": "dynamic", + "port": "Ethernet15" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-67": { + "type": "dynamic", + "port": "Ethernet29" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-79": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-48": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-90": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-05": { + "type": "dynamic", + "port": "Ethernet42" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-32": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-77": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-94": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-55": { + "type": "dynamic", + "port": "Ethernet56" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-13": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-32": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-57": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-27": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-74": { + "type": "dynamic", + "port": "Ethernet28" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-18": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-01": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-05": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-92": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-40": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-29": { + "type": "dynamic", + "port": "Ethernet54" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-42": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-44": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-44": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-06": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-46": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-91": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-38": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-18": { + "type": "dynamic", + "port": "Ethernet39" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-61": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-11": { + "type": "dynamic", + "port": "Ethernet25" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-49": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-22": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-43": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-12": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-12": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-03": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-56": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-28": { + "type": "dynamic", + "port": "Ethernet26" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-00": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-77": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-77": { + "type": "dynamic", + "port": "Ethernet58" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-83": { + "type": "dynamic", + "port": "Ethernet29" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-47": { + "type": "dynamic", + "port": "Ethernet29" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-06": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-33": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-74": { + "type": "dynamic", + "port": "Ethernet55" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-13": { + "type": "dynamic", + "port": "Ethernet13" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-17": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-68": { + "type": "dynamic", + "port": "Ethernet8" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-42": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-28": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-88": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-04": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-90": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-76": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-10": { + "type": "dynamic", + "port": "Ethernet39" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-36": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-92": { + "type": "dynamic", + "port": "Ethernet12" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-56": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-24": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-57": { + "type": "dynamic", + "port": "Ethernet27" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-97": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-16": { + "type": "dynamic", + "port": "Ethernet8" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-30": { + "type": "dynamic", + "port": "Ethernet28" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-53": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-88": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-85": { + "type": "dynamic", + "port": "Ethernet27" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-71": { + "type": "dynamic", + "port": "Ethernet36" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-01": { + "type": "dynamic", + "port": "Ethernet9" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:00-06-07-08-09-0A": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-87": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-58": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-85": { + "type": "dynamic", + "port": "Ethernet58" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-13": { + "type": "dynamic", + "port": "Ethernet42" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-45": { + "type": "dynamic", + "port": "Ethernet54" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-29": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-14": { + "type": "dynamic", + "port": "Ethernet28" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-20": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-23": { + "type": "dynamic", + "port": "Ethernet52" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-14": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-04": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-75": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-70": { + "type": "dynamic", + "port": "Ethernet24" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-95": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-70": { + "type": "dynamic", + "port": "Ethernet48" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-62": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-29": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-41": { + "type": "dynamic", + "port": "Ethernet31" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-06": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-36": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-55": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-00-65": { + "type": "dynamic", + "port": "Ethernet54" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-50": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-01-80": { + "type": "dynamic", + "port": "Ethernet8" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-92": { + "type": "dynamic", + "port": "Ethernet37" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-14": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-72": { + "type": "dynamic", + "port": "Ethernet12" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-35": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-40": { + "type": "dynamic", + "port": "Ethernet57" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-70": { + "type": "dynamic", + "port": "Ethernet32" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-02-12": { + "type": "dynamic", + "port": "Ethernet57" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-67": { + "type": "dynamic", + "port": "Ethernet56" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-19": { + "type": "dynamic", + "port": "Ethernet11" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-24": { + "type": "dynamic", + "port": "Ethernet53" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-01": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-87": { + "type": "dynamic", + "port": "Ethernet15" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-05": { + "type": "dynamic", + "port": "Ethernet13" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-04": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-03-42": { + "type": "dynamic", + "port": "Ethernet32" + } + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-59": { + "type": "dynamic", + "port": "Ethernet29" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-32": { + "type": "dynamic", + "port": "Ethernet30" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-89": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-31": { + "type": "dynamic", + "port": "Ethernet40" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-42": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-40": { + "type": "dynamic", + "port": "Ethernet41" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-15": { + "type": "dynamic", + "port": "Ethernet7" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-66": { + "type": "dynamic", + "port": "Ethernet6" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-04-17": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-46": { + "type": "dynamic", + "port": "Ethernet10" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-02-97": { + "type": "dynamic", + "port": "Ethernet23" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-00-57": { + "type": "dynamic", + "port": "Ethernet38" + }, + "OP": "SET" + }, + { + "FDB_TABLE:Vlan1000:72-06-00-01-03-66": { + "type": "dynamic", + "port": "Ethernet14" + }, + "OP": "SET" + }, + { + "OP": "SET", + "FDB_TABLE:Vlan1000:72-06-00-01-04-25": { + "type": "dynamic", + "port": "Ethernet54" + } + } +] \ No newline at end of file diff --git a/sonic-utilities-tests/filter_fdb_input/test_vectors.py b/sonic-utilities-tests/filter_fdb_input/test_vectors.py new file mode 100644 index 0000000000..55d6c136de --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_input/test_vectors.py @@ -0,0 +1,69 @@ +""" + Filter FDB entries test vector +""" +filterFdbEntriesTestVector = [ + { + "arp":[ + ], + "fdb": [ + ], + "expected_fdb": [ + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:00:08", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "expected_fdb": [ + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "expected_fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + }, + { + "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", + "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "expected_fdb": "sonic-utilities-tests/filter_fdb_input/expected_fdb.json" + }, +] From 167e993fb6ae5c39c21d11b82050abf335bbcd8a Mon Sep 17 00:00:00 2001 From: rajendra-dendukuri <47423477+rajendra-dendukuri@users.noreply.github.com> Date: Wed, 29 Apr 2020 03:11:34 -0400 Subject: [PATCH 051/111] Allow show ztp to display non-sensitive information visible to non-root user (#872) Removed user privileges checks. These changes fix #800. The ZTP PR Azure/sonic-ztp#13 is also required for these changes to work as expected. ``` admin@sonic:~$ show ztp status ZTP Admin Mode : True ZTP Service : Processing ZTP Status : IN-PROGRESS ZTP Source : local-fs (/host/ztp/ztp_data_local.json) Runtime : 06m 26s Timestamp : 2020-04-08 13:08:22 UTC ZTP Service is active 01-test-plugin: IN-PROGRESS ``` Signed-off-by: Rajendra Dendukuri --- show/main.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/show/main.py b/show/main.py index 5fd1efd8df..9372eb2fad 100755 --- a/show/main.py +++ b/show/main.py @@ -2848,9 +2848,6 @@ def ztp(status, verbose): if os.path.isfile('/usr/bin/ztp') is False: exit("ZTP feature unavailable in this image version") - if os.geteuid() != 0: - exit("Root privileges are required for this operation") - cmd = "ztp status" if verbose: cmd = cmd + " --verbose" From a68b87cbe07fb64423620b34c955a8ae7fde169c Mon Sep 17 00:00:00 2001 From: rajendra-dendukuri <47423477+rajendra-dendukuri@users.noreply.github.com> Date: Wed, 29 Apr 2020 03:12:21 -0400 Subject: [PATCH 052/111] [doc]: ZTP configuration and show commands (#866) --- doc/Command-Reference.md | 152 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index d1245019df..612158d8b1 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -121,6 +121,9 @@ * [Troubleshooting Commands](#troubleshooting-commands) * [Routing Stack](#routing-stack) * [Quagga BGP Show Commands](#Quagga-BGP-Show-Commands) +* [ZTP Configuration And Show Commands](#ztp-configuration-and-show-commands) + * [ ZTP show commands](#ztp-show-commands) + * [ZTP configuration commands](#ztp-configuration-commands) ## Document History @@ -6573,3 +6576,152 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#quagga-bgp-show-commands) + +# ZTP Configuration And Show Commands + +This section explains all the Zero Touch Provisioning commands that are supported in SONiC. + +## ZTP show commands + + +This command displays the current ZTP configuration of the switch. It also displays detailed information about current state of a ZTP session. It displays information related to all configuration sections as defined in the switch provisioning information discovered in a particular ZTP session. + +- Usage: + show ztp status + + show ztp status --verbose + +- Example: + +``` +root@B1-SP1-7712:/home/admin# show ztp status +ZTP Admin Mode : True +ZTP Service : Inactive +ZTP Status : SUCCESS +ZTP Source : dhcp-opt67 (eth0) +Runtime : 05m 31s +Timestamp : 2019-09-11 19:12:24 UTC + +ZTP Service is not running + +01-configdb-json: SUCCESS +02-connectivity-check: SUCCESS +``` +Use the verbose option to display more detailed information. + +``` +root@B1-SP1-7712:/home/admin# show ztp status --verbose +Command: ztp status --verbose +======================================== +ZTP +======================================== +ZTP Admin Mode : True +ZTP Service : Inactive +ZTP Status : SUCCESS +ZTP Source : dhcp-opt67 (eth0) +Runtime : 05m 31s +Timestamp : 2019-09-11 19:12:16 UTC +ZTP JSON Version : 1.0 + +ZTP Service is not running + +---------------------------------------- +01-configdb-json +---------------------------------------- +Status : SUCCESS +Runtime : 02m 48s +Timestamp : 2019-09-11 19:11:55 UTC +Exit Code : 0 +Ignore Result : False + +---------------------------------------- +02-connectivity-check +---------------------------------------- +Status : SUCCESS +Runtime : 04s +Timestamp : 2019-09-11 19:12:16 UTC +Exit Code : 0 +Ignore Result : False +``` + +- Description + + - **ZTP Admin Mode** - Displays if the ZTP feature is administratively enabled or disabled. Possible values are True or False. This value is configurable using "config ztp enabled" and "config ztp disable" commands. + - **ZTP Service** - Displays the ZTP service status. The following are possible values this field can display: + - *Active Discovery*: ZTP service is operational and is performing DHCP discovery to learn switch provisioning information + - *Processing*: ZTP service has discovered switch provisioning information and is processing it + - **ZTP Status** - Displays the current state and result of ZTP session. The following are possible values this field can display: + - *IN-PROGRESS*: ZTP session is currently in progress. ZTP service is processing switch provisioning information. + - *SUCCESS*: ZTP service has successfully processed the switch provisioning information. + - *FAILED*: ZTP service has failed to process the switch provisioning information. + - *Not Started*: ZTP service has not started processing the discovered switch provisioning information. + - **ZTP Source** - Displays the DHCP option and then interface name from which switch provisioning information has been discovered. + - **Runtime** - Displays the time taken for ZTP process to complete from start to finish. For individual configuration sections it indicates the time taken to process the associated configuration section. + - **Timestamp** - Displays the date/time stamp when the status field has last changed. + - **ZTP JSON Version** - Version of ZTP JSON file used for describing switch provisioning information. + - **Status** - Displays the current state and result of a configuration section. The following are possible values this field can display: + - *IN-PROGRESS*: Corresponding configuration section is currently being processed. + - *SUCCESS*: Corresponding configuration section was processed successfully. + - *FAILED*: Corresponding configuration section failed to execute successfully. + - *Not Started*: ZTP service has not started processing the corresponding configuration section. + - *DISABLED*: Corresponding configuration section has been marked as disabled and will not be processed. + - **Exit Code** - Displays the program exit code of the configuration section executed. Non-zero exit code indicates that the configuration section has failed to execute successfully. + - **Ignore Result** - If this value is True, the result of the corresponding configuration section is ignored and not used to evaluate the overall ZTP result. + - **Activity String** - In addition to above information an activity string is displayed indicating the current action being performed by the ZTP service and how much time it has been performing the mentioned activity. Below is an example. + - (04m 12s) Discovering provisioning data + +## ZTP configuration commands + +This sub-section explains the list of the configuration options available for ZTP. + + + +**config ztp enable** + +Use this command to enable ZTP administrative mode + +- Example: + +``` +root@sonic:/home/admin# config ztp enable +Running command: ztp enable +``` + + + +**config ztp disable** + +Use this command to disable ZTP administrative mode. This command can also be used to abort a current ZTP session and load the factory default switch configuration. + +- Usage: + config ztp disable + + config ztp disable -y + +- Example: + +``` +root@sonic:/home/admin# config ztp disable +Active ZTP session will be stopped and disabled, continue? [y/N]: y +Running command: ztp disable -y +``` + + +**config ztp run** + +Use this command to manually restart a new ZTP session. This command deletes the existing */etc/sonic/config_db.json* file and stats ZTP service. It also erases the previous ZTP session data. ZTP configuration is loaded on to the switch and ZTP discovery is performed. + +- Usage: + config ztp run + + config ztp run -y + +- Example: + +``` +root@sonic:/home/admin# config ztp run +ZTP will be restarted. You may lose switch data and connectivity, continue? [y/N]: y +Running command: ztp run -y +``` + +Go Back To [Beginning of the document](#SONiC-COMMAND-LINE-INTERFACE-GUIDE) or [Beginning of this section](#ztp-configuration-and-show-commands) From e89d2300bab3e61066d812f2c29ed86bff2c8ffa Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Wed, 29 Apr 2020 20:21:55 +0300 Subject: [PATCH 053/111] [fwutil]: Fix firmware update command. (#895) Signed-off-by: Nazarii Hnydyn --- fwutil/main.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fwutil/main.py b/fwutil/main.py index 7f03e54ab0..c320929cc5 100755 --- a/fwutil/main.py +++ b/fwutil/main.py @@ -227,8 +227,6 @@ def update(ctx, yes, force, image): squashfs = None try: - cup = ComponentUpdateProvider() - if image == IMAGE_NEXT: squashfs = SquashFs() @@ -237,6 +235,9 @@ def update(ctx, yes, force, image): cup = ComponentUpdateProvider(fs_path) else: log_helper.print_warning("Next boot is set to current: fallback to defaults") + cup = ComponentUpdateProvider() + else: + cup = ComponentUpdateProvider() click.echo(cup.get_status(force)) From db21d2be5bab3247d8f207ec02286353f8ba6268 Mon Sep 17 00:00:00 2001 From: Qi Luo Date: Wed, 29 Apr 2020 10:50:31 -0700 Subject: [PATCH 054/111] Make `config` command support abbreviation. (#893) --- config/main.py | 134 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 92 insertions(+), 42 deletions(-) diff --git a/config/main.py b/config/main.py index 1d196826d7..e6a459f0e1 100755 --- a/config/main.py +++ b/config/main.py @@ -15,6 +15,7 @@ from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector from minigraph import parse_device_desc_xml +from click_default_group import DefaultGroup import aaa import mlnx @@ -59,6 +60,53 @@ def log_error(msg): syslog.syslog(syslog.LOG_ERR, msg) syslog.closelog() + +# This aliased group has been modified from click examples to inherit from DefaultGroup instead of click.Group. +# DefaultGroup is a superclass of click.Group which calls a default subcommand instead of showing +# a help message if no subcommand is passed +class AbbreviationGroup(DefaultGroup): + """This subclass of a DefaultGroup supports looking up aliases in a config + file and with a bit of magic. + """ + + def get_command(self, ctx, cmd_name): + # Try to get builtin commands as normal + rv = click.Group.get_command(self, ctx, cmd_name) + if rv is not None: + return rv + + # Allow automatic abbreviation of the command. "status" for + # instance will match "st". We only allow that however if + # there is only one command. + # If there are multiple matches and the shortest one is the common prefix of all the matches, return + # the shortest one + matches = [] + shortest = None + for x in self.list_commands(ctx): + if x.lower().startswith(cmd_name.lower()): + matches.append(x) + if not shortest: + shortest = x + elif len(shortest) > len(x): + shortest = x + + if not matches: + # No command name matched. Issue Default command. + ctx.arg0 = cmd_name + cmd_name = self.default_cmd_name + return DefaultGroup.get_command(self, ctx, cmd_name) + elif len(matches) == 1: + return DefaultGroup.get_command(self, ctx, matches[0]) + else: + for x in matches: + if not x.startswith(shortest): + break + else: + return DefaultGroup.get_command(self, ctx, shortest) + + ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) + + # # Load asic_type for further use # @@ -531,11 +579,13 @@ def is_ipaddress(val): # This is our main entrypoint - the main 'config' command -@click.group(context_settings=CONTEXT_SETTINGS) +@click.group(cls=AbbreviationGroup, context_settings=CONTEXT_SETTINGS) def config(): """SONiC command line - 'config' command""" if os.geteuid() != 0: exit("Root privileges are required for this operation") + + config.add_command(aaa.aaa) config.add_command(aaa.tacacs) # === Add NAT Configuration ========== @@ -708,7 +758,7 @@ def hostname(new_hostname): # # 'portchannel' group ('config portchannel ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def portchannel(ctx): config_db = ConfigDBConnector() @@ -739,7 +789,7 @@ def remove_portchannel(ctx, portchannel_name): db = ctx.obj['db'] db.set_entry('PORTCHANNEL', portchannel_name, None) -@portchannel.group('member') +@portchannel.group(cls=AbbreviationGroup, name='member') @click.pass_context def portchannel_member(ctx): pass @@ -768,7 +818,7 @@ def del_portchannel_member(ctx, portchannel_name, port_name): # # 'mirror_session' group ('config mirror_session ...') # -@config.group('mirror_session') +@config.group(cls=AbbreviationGroup, name='mirror_session') def mirror_session(): pass @@ -819,7 +869,7 @@ def remove(session_name): # # 'pfcwd' group ('config pfcwd ...') # -@config.group() +@config.group(cls=AbbreviationGroup) def pfcwd(): """Configure pfc watchdog """ pass @@ -905,7 +955,7 @@ def start_default(verbose): # # 'qos' group ('config qos ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def qos(ctx): pass @@ -943,7 +993,7 @@ def reload(): # # 'warm_restart' group ('config warm_restart ...') # -@config.group('warm_restart') +@config.group(cls=AbbreviationGroup, name='warm_restart') @click.pass_context @click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection') def warm_restart(ctx, redis_unix_socket_path): @@ -1018,7 +1068,7 @@ def warm_restart_bgp_eoiu(ctx, enable): # # 'vlan' group ('config vlan ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context @click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection') def vlan(ctx, redis_unix_socket_path): @@ -1057,7 +1107,7 @@ def del_vlan(ctx, vid): # # 'member' group ('config vlan member ...') # -@vlan.group('member') +@vlan.group(cls=AbbreviationGroup, name='member') @click.pass_context def vlan_member(ctx): pass @@ -1172,7 +1222,7 @@ def vrf_delete_management_vrf(config_db): config_db.mod_entry('MGMT_VRF_CONFIG',"vrf_global",{"mgmtVrfEnabled": "false"}) mvrf_restart_services() -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def snmpagentaddress(ctx): """SNMP agent listening IP address, port, vrf configuration""" @@ -1221,7 +1271,7 @@ def del_snmp_agent_address(ctx, agentip, port, vrf): cmd="systemctl restart snmp" os.system (cmd) -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def snmptrap(ctx): """SNMP Trap server configuration to send traps""" @@ -1268,7 +1318,7 @@ def delete_snmptrap_server(ctx, ver): cmd="systemctl restart snmp" os.system (cmd) -@vlan.group('dhcp_relay') +@vlan.group(cls=AbbreviationGroup, name='dhcp_relay') @click.pass_context def vlan_dhcp_relay(ctx): pass @@ -1337,7 +1387,7 @@ def del_vlan_dhcp_relay_destination(ctx, vid, dhcp_relay_destination_ip): # 'bgp' group ('config bgp ...') # -@config.group() +@config.group(cls=AbbreviationGroup) def bgp(): """BGP-related configuration tasks""" pass @@ -1346,12 +1396,12 @@ def bgp(): # 'shutdown' subgroup ('config bgp shutdown ...') # -@bgp.group() +@bgp.group(cls=AbbreviationGroup) def shutdown(): """Shut down BGP session(s)""" pass -@config.group() +@config.group(cls=AbbreviationGroup) def kdump(): """ Configure kdump """ if os.geteuid() != 0: @@ -1412,7 +1462,7 @@ def neighbor(ipaddr_or_hostname, verbose): """Shut down BGP session by neighbor IP address or hostname""" _change_bgp_session_status(ipaddr_or_hostname, 'down', verbose) -@bgp.group() +@bgp.group(cls=AbbreviationGroup) def startup(): """Start up BGP session(s)""" pass @@ -1438,7 +1488,7 @@ def neighbor(ipaddr_or_hostname, verbose): # 'remove' subgroup ('config bgp remove ...') # -@bgp.group() +@bgp.group(cls=AbbreviationGroup) def remove(): "Remove BGP neighbor configuration from the device" pass @@ -1453,7 +1503,7 @@ def remove_neighbor(neighbor_ip_or_hostname): # 'interface' group ('config interface ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def interface(ctx): """Interface-related configuration tasks""" @@ -1587,7 +1637,7 @@ def mtu(ctx, interface_name, interface_mtu, verbose): # 'ip' subgroup ('config interface ip ...') # -@interface.group() +@interface.group(cls=AbbreviationGroup) @click.pass_context def ip(ctx): """Add or remove IP address""" @@ -1693,7 +1743,7 @@ def remove(ctx, interface_name, ip_addr): # -@interface.group() +@interface.group(cls=AbbreviationGroup) @click.pass_context def vrf(ctx): """Bind or unbind VRF""" @@ -1764,7 +1814,7 @@ def unbind(ctx, interface_name): # 'vrf' group ('config vrf ...') # -@config.group('vrf') +@config.group(cls=AbbreviationGroup, name='vrf') @click.pass_context def vrf(ctx): """VRF-related configuration tasks""" @@ -1809,7 +1859,7 @@ def del_vrf(ctx, vrf_name): # 'route' group ('config route ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def route(ctx): """route-related configuration tasks""" @@ -1923,7 +1973,7 @@ def del_route(ctx, command_str): # 'acl' group ('config acl ...') # -@config.group() +@config.group(cls=AbbreviationGroup) def acl(): """ACL-related configuration tasks""" pass @@ -1932,7 +1982,7 @@ def acl(): # 'add' subgroup ('config acl add ...') # -@acl.group() +@acl.group(cls=AbbreviationGroup) def add(): """ Add ACL configuration. @@ -1996,7 +2046,7 @@ def table(table_name, table_type, description, ports, stage): # 'remove' subgroup ('config acl remove ...') # -@acl.group() +@acl.group(cls=AbbreviationGroup) def remove(): """ Remove ACL configuration. @@ -2022,7 +2072,7 @@ def table(table_name): # 'acl update' group # -@acl.group() +@acl.group(cls=AbbreviationGroup) def update(): """ACL-related configuration tasks""" pass @@ -2056,7 +2106,7 @@ def incremental(file_name): # 'dropcounters' group ('config dropcounters ...') # -@config.group() +@config.group(cls=AbbreviationGroup) def dropcounters(): """Drop counter related configuration tasks""" pass @@ -2153,7 +2203,7 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, verbose): # 'pfc' group ('config interface pfc ...') # -@interface.group() +@interface.group(cls=AbbreviationGroup) @click.pass_context def pfc(ctx): """Set PFC configuration.""" @@ -2199,7 +2249,7 @@ def priority(ctx, interface_name, priority, status): # 'platform' group ('config platform ...') # -@config.group() +@config.group(cls=AbbreviationGroup) def platform(): """Platform-related configuration tasks""" @@ -2207,7 +2257,7 @@ def platform(): platform.add_command(mlnx.mlnx) # 'firmware' subgroup ("config platform firmware ...") -@platform.group() +@platform.group(cls=AbbreviationGroup) def firmware(): """Firmware configuration tasks""" pass @@ -2252,12 +2302,12 @@ def update(args): # 'watermark' group ("show watermark telemetry interval") # -@config.group() +@config.group(cls=AbbreviationGroup) def watermark(): """Configure watermark """ pass -@watermark.group() +@watermark.group(cls=AbbreviationGroup) def telemetry(): """Configure watermark telemetry""" pass @@ -2274,7 +2324,7 @@ def interval(interval): # 'interface_naming_mode' subgroup ('config interface_naming_mode ...') # -@config.group('interface_naming_mode') +@config.group(cls=AbbreviationGroup, name='interface_naming_mode') def interface_naming_mode(): """Modify interface naming mode for interacting with SONiC CLI""" pass @@ -2289,7 +2339,7 @@ def naming_mode_alias(): """Set CLI interface naming mode to ALIAS (Vendor port alias)""" set_interface_naming_mode('alias') -@config.group() +@config.group(cls=AbbreviationGroup) def ztp(): """ Configure Zero Touch Provisioning """ if os.path.isfile('/usr/bin/ztp') is False: @@ -2326,7 +2376,7 @@ def enable(enable): # # 'syslog' group ('config syslog ...') # -@config.group('syslog') +@config.group(cls=AbbreviationGroup, name='syslog') @click.pass_context def syslog_group(ctx): """Syslog server configuration tasks""" @@ -2378,7 +2428,7 @@ def del_syslog_server(ctx, syslog_ip_address): # # 'ntp' group ('config ntp ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def ntp(ctx): """NTP server configuration tasks""" @@ -2430,7 +2480,7 @@ def del_ntp_server(ctx, ntp_ip_address): # # 'sflow' group ('config sflow ...') # -@config.group() +@config.group(cls=AbbreviationGroup) @click.pass_context def sflow(ctx): """sFlow-related configuration tasks""" @@ -2511,7 +2561,7 @@ def is_valid_sample_rate(rate): # # 'sflow interface' group # -@sflow.group() +@sflow.group(cls=AbbreviationGroup) @click.pass_context def interface(ctx): """Configure sFlow settings for an interface""" @@ -2586,7 +2636,7 @@ def sample_rate(ctx, ifname, rate): # # 'sflow collector' group # -@sflow.group() +@sflow.group(cls=AbbreviationGroup) @click.pass_context def collector(ctx): """Add/Delete a sFlow collector""" @@ -2654,7 +2704,7 @@ def del_collector(ctx, name): # # 'sflow agent-id' group # -@sflow.group('agent-id') +@sflow.group(cls=AbbreviationGroup, name='agent-id') @click.pass_context def agent_id(ctx): """Add/Delete a sFlow agent""" @@ -2726,7 +2776,7 @@ def feature_status(name, state): # # 'container' group ('config container ...') # -@config.group(name='container', invoke_without_command=False) +@config.group(cls=AbbreviationGroup, name='container', invoke_without_command=False) def container(): """Modify configuration of containers""" pass @@ -2734,7 +2784,7 @@ def container(): # # 'feature' group ('config container feature ...') # -@container.group(name='feature', invoke_without_command=False) +@container.group(cls=AbbreviationGroup, name='feature', invoke_without_command=False) def feature(): """Modify configuration of container features""" pass From f90c9ccbed9355bf9cf2e6c80c28ce3b5c279123 Mon Sep 17 00:00:00 2001 From: rkdevi27 <54701695+rkdevi27@users.noreply.github.com> Date: Thu, 30 Apr 2020 05:40:18 +0530 Subject: [PATCH 055/111] ssd_mitigation_changes (#829) * ssd_mitigation_changes * ssd_mitigation_changes * ssd_mitigation_changes --- scripts/fast-reboot | 7 +++++++ scripts/log_ssd_health | 7 +++++++ setup.py | 1 + 3 files changed, 15 insertions(+) create mode 100755 scripts/log_ssd_health diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 4313af5d0a..ec0ccae1b7 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -16,6 +16,7 @@ ASSISTANT_SCRIPT="/usr/bin/neighbor_advertiser" DEVPATH="/usr/share/sonic/device" PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" +LOG_SSD_HEALTH="/usr/bin/log_ssd_health" # Require 100M available on the hard drive for warm reboot temp files, # Size is in 1K blocks: @@ -424,6 +425,12 @@ fi # service will go down and we cannot recover from it. set +e +if [ -x ${LOG_SSD_HEALTH} ]; then + debug "Collecting logs to check ssd health before fast-reboot..." + ${LOG_SSD_HEALTH} +fi + + # Kill nat docker after saving the conntrack table debug "Stopping nat ..." /usr/bin/dump_nat_entries.py diff --git a/scripts/log_ssd_health b/scripts/log_ssd_health new file mode 100755 index 0000000000..f0055f80f3 --- /dev/null +++ b/scripts/log_ssd_health @@ -0,0 +1,7 @@ +#! /bin/bash + +smartctl -a /dev/sda > /tmp/smartctl +if [ -f /tmp/smartctl ];then + logger -f /tmp/smartctl +fi + diff --git a/setup.py b/setup.py index f2abbefc38..2924566c7d 100644 --- a/setup.py +++ b/setup.py @@ -79,6 +79,7 @@ 'scripts/intfutil', 'scripts/intfstat', 'scripts/lldpshow', + 'scripts/log_ssd_health', 'scripts/mmuconfig', 'scripts/natclear', 'scripts/natconfig', From 660a2874e81f0ef8b444ba5e5fdbfeb5ae39e4a4 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Fri, 1 May 2020 03:38:24 +0800 Subject: [PATCH 056/111] [psushow] Add a column to display LED color to show platform psustatus output (#886) --- scripts/psushow | 5 +++-- sonic-utilities-tests/mock_tables/state_db.json | 6 ++++-- sonic-utilities-tests/psu_test.py | 6 +++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/scripts/psushow b/scripts/psushow index 242f10d374..60d7123346 100755 --- a/scripts/psushow +++ b/scripts/psushow @@ -35,7 +35,7 @@ def psu_status_show(index): else: psu_ids = [index] - header = ['PSU', 'Status'] + header = ['PSU', 'Status', 'LED'] status_table = [] for psu in psu_ids: @@ -51,7 +51,8 @@ def psu_status_show(index): msg = 'OK' if oper_status == 'true' else "NOT OK" else: msg = 'NOT PRESENT' - status_table.append([psu_name, msg]) + led_status = db.get(db.STATE_DB, 'PSU_INFO|{}'.format(psu_name), 'led_status') + status_table.append([psu_name, msg, led_status]) if status_table: print tabulate(status_table, header, tablefmt="simple") diff --git a/sonic-utilities-tests/mock_tables/state_db.json b/sonic-utilities-tests/mock_tables/state_db.json index 14f60801a7..924600ae4b 100644 --- a/sonic-utilities-tests/mock_tables/state_db.json +++ b/sonic-utilities-tests/mock_tables/state_db.json @@ -53,11 +53,13 @@ }, "PSU_INFO|PSU 1": { "presence": "true", - "status": "true" + "status": "true", + "led_status": "green" }, "PSU_INFO|PSU 2": { "presence": "true", - "status": "true" + "status": "true", + "led_status": "green" }, "SWITCH_CAPABILITY|switch": { "MIRROR": "true", diff --git a/sonic-utilities-tests/psu_test.py b/sonic-utilities-tests/psu_test.py index 2c8c7e2661..b4dd740e45 100644 --- a/sonic-utilities-tests/psu_test.py +++ b/sonic-utilities-tests/psu_test.py @@ -36,9 +36,9 @@ def test_verbose(self): def test_single_psu(self): runner = CliRunner() result = runner.invoke(show.cli.commands["platform"].commands["psustatus"], ["--index=1"]) - expected = """PSU Status ------ -------- -PSU 1 OK + expected = """PSU Status LED +----- -------- ----- +PSU 1 OK green """ assert result.output == expected From 24e029b12258bc12b3bcfccf49228459a1ad8bc5 Mon Sep 17 00:00:00 2001 From: Renuka Manavalan <47282725+renukamanavalan@users.noreply.github.com> Date: Sat, 2 May 2020 11:51:45 -0700 Subject: [PATCH 057/111] Improved route_check tool and adopt to 20191130 image. (#898) * Changes: 1) Add syslog support. 2) Enable forever periodic scan 3) Skip link local addresses 4) Skip eth0 routes 3) Adopt to 20191130 changes 3.1) APPl-DB INTF_TABLE may not have IP address 3.2) nexthop is never empty * No logical code change. A small name change. * 1) Adopt to 201811 -- Filter out 'lo' & 'docker0' in addition to 'eth0' as local routes 2) Ensure to read route entry w/o prefix, if not present with prefix --- scripts/route_check.py | 196 +++++++++++++++++++++++++---------------- 1 file changed, 119 insertions(+), 77 deletions(-) diff --git a/scripts/route_check.py b/scripts/route_check.py index 14b93279c2..ba64bf9bbe 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -2,10 +2,14 @@ # -*- coding: utf-8 -*- import os +import re import sys -import getopt +import argparse import ipaddress +import syslog import json +import time +from enum import Enum from swsssdk import ConfigDBConnector os.environ['PYTHONUNBUFFERED']='True' @@ -13,30 +17,36 @@ PREFIX_SEPARATOR = '/' IPV6_SEPARATOR = ':' -# Modes of operation from quiet to noisy -MODE_QUIET = 0 -MODE_ERR = 1 -MODE_INFO = 2 -MODE_DEBUG = 3 - -mode = MODE_ERR - -def set_mode(m): - global mode - if (m == 'QUIET'): - mode = MODE_QUIET - elif (m == 'ERR'): - mode = MODE_ERR - elif (m == 'INFO'): - mode = MODE_INFO - elif (m == 'DEBUG'): - mode = MODE_DEBUG - return mode +MIN_SCAN_INTERVAL = 10 # Every 10 seconds +MAX_SCAN_INTERVAL = 3600 # An hour + +class Level(Enum): + ERR = 'ERR' + INFO = 'INFO' + DEBUG = 'DEBUG' + + def __str__(self): + return self.value + +report_level = syslog.LOG_ERR + +def set_level(lvl): + global report_level + + if (lvl == Level.INFO): + report_level = syslog.LOG_INFO + + if (lvl == Level.DEBUG): + report_level = syslog.LOG_DEBUG + def print_message(lvl, *args): - if (lvl <= mode): + if (lvl <= report_level): + msg = "" for arg in args: - print arg + msg += " " + str(arg) + print(msg) + syslog.syslog(lvl, msg) def add_prefix(ip): if ip.find(IPV6_SEPARATOR) == -1: @@ -48,12 +58,9 @@ def add_prefix(ip): def add_prefix_ifnot(ip): return ip if ip.find(PREFIX_SEPARATOR) != -1 else add_prefix(ip) -def ip_subnet(ip): - if ip.find(":") == -1: - net = ipaddress.IPv4Network(ip, False) - else: - net = ipaddress.IPv6Network(ip, False) - return net.with_prefixlen +def is_local(ip): + t = ipaddress.ip_address(ip.split("/")[0].decode('utf-8')) + return t.is_link_local def cmps(s1, s2): if (s1 == s2): @@ -93,103 +100,138 @@ def do_diff(t1, t2): def get_routes(): db = ConfigDBConnector() db.db_connect('APPL_DB') - print_message(MODE_DEBUG, "APPL DB connected for routes") + print_message(syslog.LOG_DEBUG, "APPL DB connected for routes") keys = db.get_keys('ROUTE_TABLE') - print_message(MODE_DEBUG, json.dumps({"ROUTE_TABLE": keys}, indent=4)) valid_rt = [] - skip_rt = [] for k in keys: - if db.get_entry('ROUTE_TABLE', k)['nexthop'] != '': - valid_rt.append(add_prefix_ifnot(k)) - else: - skip_rt.append(k) + if not is_local(k): + valid_rt.append(add_prefix_ifnot(k.lower())) - print_message(MODE_INFO, json.dumps({"skipped_routes" : skip_rt}, indent=4)) + print_message(syslog.LOG_DEBUG, json.dumps({"ROUTE_TABLE": sorted(valid_rt)}, indent=4)) return sorted(valid_rt) def get_route_entries(): db = ConfigDBConnector() db.db_connect('ASIC_DB') - print_message(MODE_DEBUG, "ASIC DB connected") + print_message(syslog.LOG_DEBUG, "ASIC DB connected") keys = db.get_keys('ASIC_STATE:SAI_OBJECT_TYPE_ROUTE_ENTRY', False) - print_message(MODE_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": keys}, indent=4)) rt = [] for k in keys: - rt.append(k.split("\"", -1)[3]) + e = k.lower().split("\"", -1)[3] + if not is_local(e): + rt.append(e) + print_message(syslog.LOG_DEBUG, json.dumps({"ASIC_ROUTE_ENTRY": sorted(rt)}, indent=4)) return sorted(rt) def get_interfaces(): db = ConfigDBConnector() db.db_connect('APPL_DB') - print_message(MODE_DEBUG, "APPL DB connected for interfaces") + print_message(syslog.LOG_DEBUG, "APPL DB connected for interfaces") intf = [] keys = db.get_keys('INTF_TABLE') - print_message(MODE_DEBUG, json.dumps({"APPL_DB_INTF": keys}, indent=4)) for k in keys: - subk = k.split(':', -1) - alias = subk[0] - ip_prefix = ":".join(subk[1:]) - ip = add_prefix(ip_prefix.split("/", -1)[0]) - if (subk[0] == "eth0") or (subk[0] == "docker0"): + lst = re.split(':', k.lower(), maxsplit=1) + if len(lst) == 1: + # No IP address in key; ignore continue - if (subk[0] != "lo"): - intf.append(ip_subnet(ip_prefix)) - intf.append(ip) + + ip = add_prefix(lst[1].split("/", -1)[0]) + if not is_local(ip): + intf.append(ip) + + print_message(syslog.LOG_DEBUG, json.dumps({"APPL_DB_INTF": sorted(intf)}, indent=4)) return sorted(intf) +def filter_out_local_interfaces(keys): + rt = [] + local_if = set(['eth0', 'lo', 'docker0']) + + db = ConfigDBConnector() + db.db_connect('APPL_DB') + + for k in keys: + e = db.get_entry('ROUTE_TABLE', k) + if not e: + # Prefix might have been added. So try w/o it. + e = db.get_entry('ROUTE_TABLE', k.split("/")[0]) + if not e or (e['ifname'] not in local_if): + rt.append(k) + + return rt + def check_routes(): - intf_miss = [] - rt_miss = [] - re_miss = [] + intf_appl_miss = [] + rt_appl_miss = [] + rt_asic_miss = [] results = {} err_present = False - rt_miss, re_miss = do_diff(get_routes(), get_route_entries()) - intf_miss, re_miss = do_diff(get_interfaces(), re_miss) + rt_appl = get_routes() + rt_asic = get_route_entries() + intf_appl = get_interfaces() + + # Diff APPL-DB routes & ASIC-DB routes + rt_appl_miss, rt_asic_miss = do_diff(rt_appl, rt_asic) - if (len(rt_miss) != 0): - results["missed_ROUTE_TABLE_routes"] = rt_miss + # Check missed ASIC routes against APPL-DB INTF_TABLE + _, rt_asic_miss = do_diff(intf_appl, rt_asic_miss) + + # Check APPL-DB INTF_TABLE with ASIC table route entries + intf_appl_miss, _ = do_diff(intf_appl, rt_asic) + + if (len(rt_appl_miss) != 0): + rt_appl_miss = filter_out_local_interfaces(rt_appl_miss) + + if (len(rt_appl_miss) != 0): + results["missed_ROUTE_TABLE_routes"] = rt_appl_miss err_present = True - if (len(intf_miss) != 0): - results["missed_INTF_TABLE_entries"] = intf_miss + if (len(intf_appl_miss) != 0): + results["missed_INTF_TABLE_entries"] = intf_appl_miss err_present = True - if (len(re_miss) != 0): - results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = re_miss + if (len(rt_asic_miss) != 0): + results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss err_present = True if err_present: - print_message(MODE_ERR, "results: {", json.dumps(results, indent=4), "}") - print_message(MODE_ERR, "Failed. Look at reported mismatches above") + print_message(syslog.LOG_ERR, "results: {", json.dumps(results, indent=4), "}") + print_message(syslog.LOG_ERR, "Failed. Look at reported mismatches above") return -1 else: - print_message(MODE_ERR, "All good!") + print_message(syslog.LOG_INFO, "All good!") return 0 -def usage(): - print sys.argv[0], "[-m ]" - print sys.argv[0], "[--mode=]" - sys.exit(-1) - def main(argv): - try: - opts, argv = getopt.getopt(argv, "m:", ["mode="]) - except getopt.GetoptError: - usage() + interval = 0 + parser=argparse.ArgumentParser(description="Verify routes between APPL-DB & ASIC-DB are in sync") + parser.add_argument('-m', "--mode", type=Level, choices=list(Level), default='ERR') + parser.add_argument("-i", "--interval", type=int, default=0, help="Scan interval in seconds") + args = parser.parse_args() + + set_level(args.mode) + + if args.interval: + if (args.interval < MIN_SCAN_INTERVAL): + interval = MIN_SCAN_INTERVAL + elif (args.interval > MAX_SCAN_INTERVAL): + interval = MAX_SCAN_INTERVAL + else: + interval = args.interval - for opt, arg in opts: - if opt in ("-m", "--mode"): - set_mode(arg) + while True: + ret = check_routes() - ret = check_routes() - sys.exit(ret) + if interval: + time.sleep(interval) + else: + sys.exit(ret) if __name__ == "__main__": From 9e9d99d0d4652427dbb0f7c5f3bf551945e2a834 Mon Sep 17 00:00:00 2001 From: judyjoseph <53951155+judyjoseph@users.noreply.github.com> Date: Sat, 2 May 2020 13:51:15 -0700 Subject: [PATCH 058/111] [config] Add support for multi-ASIC devices (#877) * [Phase 1] Multi ASIC config command changes, db_mgrator.py script updates for handing namespace. * Fixes and comment updates * Comments addressed + added support for user to input the config files per namespace also. * Updates per comments + based on the updated SonicV2Connector/ConfigDBConnector class design * Review comments update. * Help string updated for config save/reload/load --- config/main.py | 273 ++++++++++++++++++++++++++++++++++++----- scripts/db_migrator.py | 25 +++- 2 files changed, 260 insertions(+), 38 deletions(-) diff --git a/config/main.py b/config/main.py index e6a459f0e1..617d155d95 100755 --- a/config/main.py +++ b/config/main.py @@ -12,8 +12,7 @@ import sonic_device_util import ipaddress -from swsssdk import ConfigDBConnector -from swsssdk import SonicV2Connector +from swsssdk import ConfigDBConnector, SonicV2Connector, SonicDBConfig from minigraph import parse_device_desc_xml from click_default_group import DefaultGroup @@ -28,6 +27,8 @@ SYSLOG_IDENTIFIER = "config" VLAN_SUB_INTERFACE_SEPARATOR = '.' ASIC_CONF_FILENAME = 'asic.conf' +DEFAULT_CONFIG_DB_FILE = '/etc/sonic/config_db.json' +NAMESPACE_PREFIX = 'asic' INIT_CFG_FILE = '/etc/sonic/init_cfg.json' @@ -162,6 +163,48 @@ def run_command(command, display_cmd=False, ignore_error=False): if proc.returncode != 0 and not ignore_error: sys.exit(proc.returncode) +# API to check if this is a multi-asic device or not. +def is_multi_asic(): + num_asics = _get_num_asic() + + if num_asics > 1: + return True + else: + return False + +"""In case of Multi-Asic platform, Each ASIC will have a linux network namespace created. + So we loop through the databases in different namespaces and depending on the sub_role + decide whether this is a front end ASIC/namespace or a back end one. +""" +def get_all_namespaces(): + front_ns = [] + back_ns = [] + num_asics = _get_num_asic() + + if is_multi_asic(): + for asic in range(num_asics): + namespace = "{}{}".format(NAMESPACE_PREFIX, asic) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + + metadata = config_db.get_table('DEVICE_METADATA') + if metadata['localhost']['sub_role'] == 'FrontEnd': + front_ns.append(namespace) + elif metadata['localhost']['sub_role'] == 'BackEnd': + back_ns.append(namespace) + + return {'front_ns': front_ns, 'back_ns': back_ns} + +# Validate whether a given namespace name is valid in the device. +def validate_namespace(namespace): + if not is_multi_asic(): + return True + + namespaces = get_all_namespaces() + if namespace in namespaces['front_ns'] + namespaces['back_ns']: + return True + else: + return False def interface_alias_to_name(interface_alias): """Return default interface name if alias name is given as argument @@ -582,6 +625,10 @@ def is_ipaddress(val): @click.group(cls=AbbreviationGroup, context_settings=CONTEXT_SETTINGS) def config(): """SONiC command line - 'config' command""" + + # Load the global config file database_global.json once. + SonicDBConfig.load_sonic_global_db_config() + if os.geteuid() != 0: exit("Root privileges are required for this operation") @@ -593,34 +640,148 @@ def config(): @config.command() @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, - expose_value=False, prompt='Existing file will be overwritten, continue?') -@click.argument('filename', default='/etc/sonic/config_db.json', type=click.Path()) + expose_value=False, prompt='Existing files will be overwritten, continue?') +@click.argument('filename', required=False) def save(filename): - """Export current config DB to a file on disk.""" - command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, filename) - run_command(command, display_cmd=True) + """Export current config DB to a file on disk.\n + : Names of configuration file(s) to save, separated by comma with no spaces in between + """ + num_asic = _get_num_asic() + cfg_files = [] + + num_cfg_file = 1 + if is_multi_asic(): + num_cfg_file += num_asic + + # If the user give the filename[s], extract the file names. + if filename is not None: + cfg_files = filename.split(',') + + if len(cfg_files) != num_cfg_file: + click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) + return + + """In case of multi-asic mode we have additional config_db{NS}.json files for + various namespaces created per ASIC. {NS} is the namespace index. + """ + for inst in range(-1, num_cfg_file-1): + #inst = -1, refers to the linux host where there is no namespace. + if inst is -1: + namespace = None + else: + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) + + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + else: + if namespace is None: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) + + if namespace is None: + command = "{} -d --print-data > {}".format(SONIC_CFGGEN_PATH, file) + else: + command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file) + + run_command(command, display_cmd=True) @config.command() @click.option('-y', '--yes', is_flag=True) -@click.argument('filename', default='/etc/sonic/config_db.json', type=click.Path(exists=True)) +@click.argument('filename', required=False) def load(filename, yes): - """Import a previous saved config DB dump file.""" + """Import a previous saved config DB dump file. + : Names of configuration file(s) to load, separated by comma with no spaces in between + """ + if filename is None: + message = 'Load config from the default config file(s) ?' + else: + message = 'Load config from the file(s) {} ?'.format(filename) + if not yes: - click.confirm('Load config from the file %s?' % filename, abort=True) - command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) - run_command(command, display_cmd=True) + click.confirm(message, abort=True) + + num_asic = _get_num_asic() + cfg_files = [] + + num_cfg_file = 1 + if is_multi_asic(): + num_cfg_file += num_asic + + # If the user give the filename[s], extract the file names. + if filename is not None: + cfg_files = filename.split(',') + + if len(cfg_files) != num_cfg_file: + click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) + return + + """In case of multi-asic mode we have additional config_db{NS}.json files for + various namespaces created per ASIC. {NS} is the namespace index. + """ + for inst in range(-1, num_cfg_file-1): + #inst = -1, refers to the linux host where there is no namespace. + if inst is -1: + namespace = None + else: + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) + + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + else: + if namespace is None: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) + + # if any of the config files in linux host OR namespace is not present, return + if not os.path.isfile(file): + click.echo("The config_db file {} doesn't exist".format(file)) + return + + if namespace is None: + command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file) + else: + command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file) + + run_command(command, display_cmd=True) + @config.command() @click.option('-y', '--yes', is_flag=True) @click.option('-l', '--load-sysinfo', is_flag=True, help='load system default information (mac, portmap etc) first.') -@click.argument('filename', default='/etc/sonic/config_db.json', type=click.Path(exists=True)) +@click.argument('filename', required=False) def reload(filename, yes, load_sysinfo): - """Clear current configuration and import a previous saved config DB dump file.""" + """Clear current configuration and import a previous saved config DB dump file. + : Names of configuration file(s) to load, separated by comma with no spaces in between + """ + if filename is None: + message = 'Clear current config and reload config from the default config file(s) ?' + else: + message = 'Clear current config and reload config from the file(s) {} ?'.format(filename) + if not yes: - click.confirm('Clear current config and reload config from the file %s?' % filename, abort=True) + click.confirm(message, abort=True) log_info("'reload' executing...") + num_asic = _get_num_asic() + cfg_files = [] + + num_cfg_file = 1 + if is_multi_asic(): + num_cfg_file += num_asic + + # If the user give the filename[s], extract the file names. + if filename is not None: + cfg_files = filename.split(',') + + if len(cfg_files) != num_cfg_file: + click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) + return + if load_sysinfo: command = "{} -j {} -v DEVICE_METADATA.localhost.hwsku".format(SONIC_CFGGEN_PATH, filename) proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) @@ -634,26 +795,74 @@ def reload(filename, yes, load_sysinfo): #Stop services before config push log_info("'reload' stopping services...") _stop_services() - config_db = ConfigDBConnector() - config_db.connect() - client = config_db.get_redis_client(config_db.CONFIG_DB) - client.flushdb() - if load_sysinfo: - command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku) - run_command(command, display_cmd=True) - if os.path.isfile(INIT_CFG_FILE): - command = "{} -j {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, filename) - else: - command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) + """ In Single AISC platforms we have single DB service. In multi-ASIC platforms we have a global DB + service running in the host + DB services running in each ASIC namespace created per ASIC. + In the below logic, we get all namespaces in this platform and add an empty namespace '' + denoting the current namespace which we are in ( the linux host ) + """ + for inst in range(-1, num_cfg_file-1): + # Get the namespace name, for linux host it is None + if inst is -1: + namespace = None + else: + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) - run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, 1) + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + else: + if namespace is None: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) - # Migrate DB contents to latest version - db_migrator='/usr/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - run_command(db_migrator + ' -o migrate') + #Check the file exists before proceeding. + if not os.path.isfile(file): + click.echo("The config_db file {} doesn't exist".format(file)) + continue + + if namespace is None: + config_db = ConfigDBConnector() + else: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + + config_db.connect() + client = config_db.get_redis_client(config_db.CONFIG_DB) + client.flushdb() + if load_sysinfo: + if namespace is None: + command = "{} -H -k {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku) + else: + command = "{} -H -k {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, cfg_hwsku, namespace) + run_command(command, display_cmd=True) + + # For the database service running in linux host we use the file user gives as input + # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, + # the default config_db.json format is used. + + if namespace is None: + if os.path.isfile(INIT_CFG_FILE): + command = "{} -j {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file) + else: + command = "{} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, file) + else: + if os.path.isfile(INIT_CFG_FILE): + command = "{} -j {} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, INIT_CFG_FILE, file, namespace) + else: + command = "{} -j {} -n {} --write-to-db".format(SONIC_CFGGEN_PATH, file, namespace) + + run_command(command, display_cmd=True) + client.set(config_db.INIT_INDICATOR, 1) + + # Migrate DB contents to latest version + db_migrator='/usr/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + if namespace is None: + command = "{} -o migrate".format(db_migrator) + else: + command = "{} -o migrate -n {}".format(db_migrator, namespace) + run_command(command, display_cmd=True) # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 8d61c29203..4ea89e0bff 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -4,7 +4,7 @@ import sys import argparse import syslog -from swsssdk import ConfigDBConnector +from swsssdk import ConfigDBConnector, SonicDBConfig import sonic_device_util @@ -24,7 +24,7 @@ def log_error(msg): class DBMigrator(): - def __init__(self, socket=None): + def __init__(self, namespace, socket=None): """ Version string format: version___ @@ -46,10 +46,12 @@ def __init__(self, socket=None): if socket: db_kwargs['unix_socket_path'] = socket - self.configDB = ConfigDBConnector(**db_kwargs) + if namespace is None: + self.configDB = ConfigDBConnector(**db_kwargs) + else: + self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') - def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD @@ -291,14 +293,25 @@ def main(): required = False, help = 'the unix socket that the desired database listens on', default = None ) + parser.add_argument('-n', + dest='namespace', + metavar='asic namespace', + type = str, + required = False, + help = 'The asic namespace whose DB instance we need to connect', + default = None ) args = parser.parse_args() operation = args.operation socket_path = args.socket + namespace = args.namespace + + if args.namespace is not None: + SonicDBConfig.load_sonic_global_db_config(namespace=args.namespace) if socket_path: - dbmgtr = DBMigrator(socket=socket_path) + dbmgtr = DBMigrator(namespace, socket=socket_path) else: - dbmgtr = DBMigrator() + dbmgtr = DBMigrator(namespace) result = getattr(dbmgtr, operation)() if result: From fa19768efe22f14476c47baa05f7daa66406dd46 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Sun, 3 May 2020 17:45:56 -0700 Subject: [PATCH 059/111] [show] Fix abbreviations for 'show ip bgp ...' commands (#901) --- show/bgp_frr_v4.py | 4 ++-- show/bgp_frr_v6.py | 4 ++-- show/bgp_quagga_v4.py | 4 ++-- show/bgp_quagga_v6.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index ce946846bf..5f2831c05a 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,5 +1,5 @@ import click -from show.main import ip, run_command, get_bgp_summary_extended +from show.main import AliasedGroup, ip, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ip.group() +@ip.group(cls=AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" pass diff --git a/show/bgp_frr_v6.py b/show/bgp_frr_v6.py index 8c3ad70dde..f199ac60b9 100644 --- a/show/bgp_frr_v6.py +++ b/show/bgp_frr_v6.py @@ -1,5 +1,5 @@ import click -from show.main import ipv6, run_command, get_bgp_summary_extended +from show.main import AliasedGroup, ipv6, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ipv6.group() +@ipv6.group(cls=AliasedGroup) def bgp(): """Show IPv6 BGP (Border Gateway Protocol) information""" pass diff --git a/show/bgp_quagga_v4.py b/show/bgp_quagga_v4.py index 73075e5aec..bddcf891d9 100644 --- a/show/bgp_quagga_v4.py +++ b/show/bgp_quagga_v4.py @@ -1,5 +1,5 @@ import click -from show.main import ip, run_command, get_bgp_summary_extended +from show.main import AliasedGroup, ip, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ip.group() +@ip.group(cls=AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" pass diff --git a/show/bgp_quagga_v6.py b/show/bgp_quagga_v6.py index f5e8ceef23..78c06988c9 100644 --- a/show/bgp_quagga_v6.py +++ b/show/bgp_quagga_v6.py @@ -1,5 +1,5 @@ import click -from show.main import ipv6, run_command, get_bgp_summary_extended +from show.main import AliasedGroup, ipv6, run_command, get_bgp_summary_extended ############################################################################### @@ -9,7 +9,7 @@ ############################################################################### -@ipv6.group() +@ipv6.group(cls=AliasedGroup) def bgp(): """Show IPv6 BGP (Border Gateway Protocol) information""" pass From 5f18b8da4df60d19af186ef7d73a6b39c7061de4 Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Wed, 6 May 2020 12:37:28 -0700 Subject: [PATCH 060/111] [Vnet] Fix NameError for 'swsssdk' and align output (#902) * Fix NameError for 'swsssdk', align neig output --- show/main.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/show/main.py b/show/main.py index 9372eb2fad..6692ff99e1 100755 --- a/show/main.py +++ b/show/main.py @@ -3026,7 +3026,7 @@ def neighbors(): else: vnet_intfs[vnet_name] = [k] - appl_db = swsssdk.SonicV2Connector() + appl_db = SonicV2Connector() appl_db.connect(appl_db.APPL_DB) # Fetching data from appl_db for neighbors @@ -3053,6 +3053,9 @@ def neighbors(): click.echo(tabulate(table, header)) click.echo("\n") + if not bool(vnet_intfs): + click.echo(tabulate(table, header)) + @vnet.group() def routes(): """Show vnet routes related information""" @@ -3061,7 +3064,7 @@ def routes(): @routes.command() def all(): """Show all vnet routes""" - appl_db = swsssdk.SonicV2Connector() + appl_db = SonicV2Connector() appl_db.connect(appl_db.APPL_DB) header = ['vnet name', 'prefix', 'nexthop', 'interface'] @@ -3104,7 +3107,7 @@ def all(): @routes.command() def tunnel(): """Show vnet tunnel routes""" - appl_db = swsssdk.SonicV2Connector() + appl_db = SonicV2Connector() appl_db.connect(appl_db.APPL_DB) header = ['vnet name', 'prefix', 'endpoint', 'mac address', 'vni'] From 98b0cb668c76332da7f17707d5ec2992dc235fc7 Mon Sep 17 00:00:00 2001 From: arlakshm <55814491+arlakshm@users.noreply.github.com> Date: Wed, 6 May 2020 18:11:58 -0700 Subject: [PATCH 061/111] [config] Support load_minigraph command for multi NPU platform (#896) - Modify the load_minigraph command handler to support multi NPU platforms --- config/main.py | 59 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 18 deletions(-) diff --git a/config/main.py b/config/main.py index 617d155d95..83dc4ad7a0 100755 --- a/config/main.py +++ b/config/main.py @@ -36,6 +36,7 @@ SYSTEMCTL_ACTION_RESTART="restart" SYSTEMCTL_ACTION_RESET_FAILED="reset-failed" +DEFAULT_NAMESPACE = '' # ========================== Syslog wrappers ========================== def log_debug(msg): @@ -632,6 +633,8 @@ def config(): if os.geteuid() != 0: exit("Root privileges are required for this operation") + SonicDBConfig.load_sonic_global_db_config() + config.add_command(aaa.aaa) config.add_command(aaa.tacacs) @@ -915,26 +918,46 @@ def load_minigraph(): log_info("'load_minigraph' stopping services...") _stop_services() - config_db = ConfigDBConnector() - config_db.connect() - client = config_db.get_redis_client(config_db.CONFIG_DB) - client.flushdb() - if os.path.isfile('/etc/sonic/init_cfg.json'): - command = "{} -H -m -j /etc/sonic/init_cfg.json --write-to-db".format(SONIC_CFGGEN_PATH) - else: - command = "{} -H -m --write-to-db".format(SONIC_CFGGEN_PATH) - run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, 1) - if device_type != 'MgmtToRRouter': - run_command('pfcwd start_default', display_cmd=True) + # For Single Asic platform the namespace list has the empty string + # for mulit Asic platform the empty string to generate the config + # for host + namespace_list = [DEFAULT_NAMESPACE] + num_npus = sonic_device_util.get_num_npus() + if num_npus > 1: + namespace_list += sonic_device_util.get_namespaces() + + for namespace in namespace_list: + if namespace is DEFAULT_NAMESPACE: + config_db = ConfigDBConnector() + cfggen_namespace_option = " " + ns_cmd_prefix = " " + else: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + cfggen_namespace_option = " -n {}".format(namespace) + ns_cmd_prefix = "sudo ip netns exec {}".format(namespace) + config_db.connect() + client = config_db.get_redis_client(config_db.CONFIG_DB) + client.flushdb() + if os.path.isfile('/etc/sonic/init_cfg.json'): + command = "{} -H -m -j /etc/sonic/init_cfg.json {} --write-to-db".format(SONIC_CFGGEN_PATH, cfggen_namespace_option) + else: + command = "{} -H -m --write-to-db {} ".format(SONIC_CFGGEN_PATH,cfggen_namespace_option) + run_command(command, display_cmd=True) + client.set(config_db.INIT_INDICATOR, 1) + + # These commands are not run for host on multi asic platform + if num_npus == 1 or namespace is not DEFAULT_NAMESPACE: + if device_type != 'MgmtToRRouter': + run_command('{} pfcwd start_default'.format(ns_cmd_prefix), display_cmd=True) + run_command("{} config qos reload".format(ns_cmd_prefix), display_cmd=True) + + # Write latest db version string into db + db_migrator='/usr/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) + if os.path.isfile('/etc/sonic/acl.json'): run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) - run_command("config qos reload", display_cmd=True) - - # Write latest db version string into db - db_migrator='/usr/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - run_command(db_migrator + ' -o set_version') # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them From af93c5ea4d4689153472e83868def9ba3be6ec4f Mon Sep 17 00:00:00 2001 From: Travis Van Duyn Date: Fri, 8 May 2020 15:36:38 -0700 Subject: [PATCH 062/111] [show] Add 'errors' and 'rates' subcommands to 'show interfaces counters' group (#900) Co-authored-by: Travis Van Duyn --- doc/Command-Reference.md | 29 ++++++++++++ scripts/portstat | 100 +++++++++++++++++++++++++++------------ show/main.py | 22 +++++++++ 3 files changed, 121 insertions(+), 30 deletions(-) mode change 100755 => 100644 scripts/portstat diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 612158d8b1..ac16d496a7 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2298,6 +2298,8 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte - Usage: ``` show interfaces counters [-a|--printall] [-p|--period ] + show interfaces counters errors + show interfaces counters rates show interfaces counters rif [-p|--period ] ``` @@ -2315,6 +2317,33 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte Ethernet24 U 33,543,533,441 36.59 MB/s 0.71% 0 1,613 0 43,066,076,370 49.92 MB/s 0.97% 0 0 0 ``` +The "errors" subcommand is used to display the interface errors. + +- Example: + ``` + admin@str-s6000-acs-11:~$ show interface counters errors + IFACE STATE RX_ERR RX_DRP RX_OVR TX_ERR TX_DRP TX_OVR + ----------- ------- -------- -------- -------- -------- -------- -------- + Ethernet0 U 0 4 0 0 0 0 + Ethernet4 U 0 0 0 0 0 0 + Ethernet8 U 0 1 0 0 0 0 + Ethernet12 U 0 0 0 0 0 0 +``` + +The "rates" subcommand is used to disply only the interface rates. + +- Exmaple: + ``` + admin@str-s6000-acs-11:/usr/bin$ show int counters rates + IFACE STATE RX_OK RX_BPS RX_PPS RX_UTIL TX_OK TX_BPS TX_PPS TX_UTIL + ----------- ------- ------- -------- -------- --------- ------- -------- -------- --------- + Ethernet0 U 467510 N/A N/A N/A 466488 N/A N/A N/A + Ethernet4 U 469679 N/A N/A N/A 469245 N/A N/A N/A + Ethernet8 U 466660 N/A N/A N/A 465982 N/A N/A N/A + Ethernet12 U 466579 N/A N/A N/A 466318 N/A N/A N/A +``` + + The "rif" subcommand is used to display l3 interface counters. Layer 3 interfaces include router interfaces, portchannels and vlan interfaces. - Example: diff --git a/scripts/portstat b/scripts/portstat old mode 100755 new mode 100644 index 29cdbd0c62..3000f5469f --- a/scripts/portstat +++ b/scripts/portstat @@ -2,7 +2,7 @@ ##################################################################### # -# portstat is a tool for summarizing network statistics. +# portstat is a tool for summarizing network statistics. # ##################################################################### @@ -25,9 +25,10 @@ NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ tx_err, tx_drop, tx_ovr, rx_byt, tx_byt") header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_OK', 'TX_BPS', 'Tx_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] - -header = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', +header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] counter_bucket_dict = { 'SAI_PORT_STAT_IF_IN_UCAST_PKTS': 0, @@ -93,7 +94,7 @@ class Portstat(object): if counter_port_name_map is None: return cnstat_dict for port in natsorted(counter_port_name_map): - cnstat_dict[port] = get_counters(counter_port_name_map[port]) + cnstat_dict[port] = get_counters(counter_port_name_map[port]) return cnstat_dict def get_port_speed(self, port_name): @@ -127,41 +128,54 @@ class Portstat(object): else: return STATUS_NA - def cnstat_print(self, cnstat_dict, use_json, print_all): + def cnstat_print(self, cnstat_dict, use_json, print_all, errors_only, rates_only): """ Print the cnstat. """ table = [] + header = None for key, data in cnstat_dict.iteritems(): if key == 'time': continue if print_all: + header = header_all table.append((key, self.get_port_state(key), data.rx_ok, STATUS_NA, STATUS_NA, STATUS_NA, data.rx_err, data.rx_drop, data.rx_ovr, data.tx_ok, STATUS_NA, STATUS_NA, STATUS_NA, data.tx_err, data.tx_drop, data.tx_ovr)) + elif errors_only: + header = header_errors_only + table.append((key, self.get_port_state(key), + data.rx_err, data.rx_drop, data.rx_ovr, + data.tx_err, data.tx_drop, data.tx_ovr)) + elif rates_only: + header = header_rates_only + table.append((key, self.get_port_state(key), + data.rx_ok, STATUS_NA, STATUS_NA, STATUS_NA, + data.tx_ok, STATUS_NA, STATUS_NA, STATUS_NA)) else: + header = header_std table.append((key, self.get_port_state(key), data.rx_ok, STATUS_NA, STATUS_NA, data.rx_err, data.rx_drop, data.rx_ovr, data.tx_ok, STATUS_NA, STATUS_NA, data.tx_err, data.tx_drop, data.tx_ovr)) - if use_json: - print table_as_json(table, header_all if print_all else header) + print table_as_json(table, header) else: - print tabulate(table, header_all if print_all else header, tablefmt='simple', stralign='right') + print tabulate(table, header, tablefmt='simple', stralign='right') - def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, use_json, print_all): + def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, use_json, print_all, errors_only, rates_only): """ Print the difference between two cnstat results. """ table = [] + header = None for key, cntr in cnstat_new_dict.iteritems(): if key == 'time': @@ -174,6 +188,7 @@ class Portstat(object): port_speed = self.get_port_speed(key) if print_all: + header = header_all if old_cntr is not None: table.append((key, self.get_port_state(key), ns_diff(cntr.rx_ok, old_cntr.rx_ok), @@ -206,21 +221,42 @@ class Portstat(object): cntr.tx_err, cntr.tx_drop, cntr.tx_ovr)) + elif errors_only: + header = header_errors_only + table.append((key, self.get_port_state(key), + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + elif rates_only: + header = header_rates_only + table.append((key, self.get_port_state(key), + ns_diff(cntr.rx_ok, old_cntr.rx_ok), + STATUS_NA, + STATUS_NA, + STATUS_NA, + ns_diff(cntr.tx_ok, old_cntr.tx_ok), + STATUS_NA, + STATUS_NA, + STATUS_NA)) else: + header = header_std if old_cntr is not None: table.append((key, self.get_port_state(key), - ns_diff(cntr.rx_ok, old_cntr.rx_ok), - ns_brate(cntr.rx_byt, old_cntr.rx_byt, time_gap), - ns_util(cntr.rx_byt, old_cntr.rx_byt, time_gap), - ns_diff(cntr.rx_err, old_cntr.rx_err), - ns_diff(cntr.rx_drop, old_cntr.rx_drop), - ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), - ns_diff(cntr.tx_ok, old_cntr.tx_ok), - ns_brate(cntr.tx_byt, old_cntr.tx_byt, time_gap), - ns_util(cntr.tx_byt, old_cntr.tx_byt, time_gap), - ns_diff(cntr.tx_err, old_cntr.tx_err), - ns_diff(cntr.tx_drop, old_cntr.tx_drop), - ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) + ns_diff(cntr.rx_ok, old_cntr.rx_ok), + ns_brate(cntr.rx_byt, old_cntr.rx_byt, time_gap), + ns_util(cntr.rx_byt, old_cntr.rx_byt, time_gap), + ns_diff(cntr.rx_err, old_cntr.rx_err), + ns_diff(cntr.rx_drop, old_cntr.rx_drop), + ns_diff(cntr.rx_ovr, old_cntr.rx_ovr), + ns_diff(cntr.tx_ok, old_cntr.tx_ok), + ns_brate(cntr.tx_byt, old_cntr.tx_byt, time_gap), + ns_util(cntr.tx_byt, old_cntr.tx_byt, time_gap), + ns_diff(cntr.tx_err, old_cntr.tx_err), + ns_diff(cntr.tx_drop, old_cntr.tx_drop), + ns_diff(cntr.tx_ovr, old_cntr.tx_ovr))) else: table.append((key, self.get_port_state(key), cntr.rx_ok, @@ -239,10 +275,7 @@ class Portstat(object): if use_json: print table_as_json(table, header) else: - if print_all: - print tabulate(table, header_all, tablefmt='simple', stralign='right') - else: - print tabulate(table, header, tablefmt='simple', stralign='right') + print tabulate(table, header, tablefmt='simple', stralign='right') def main(): @@ -255,18 +288,22 @@ Examples: portstat -c -t test portstat -t test portstat -d -t test + portstat -e portstat portstat -r + portstat -R portstat -a portstat -p 20 """) + parser.add_argument('-a', '--all', action='store_true', help='Display all the stats counters') parser.add_argument('-c', '--clear', action='store_true', help='Copy & clear stats') parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats, either the uid or the specified tag') parser.add_argument('-D', '--delete-all', action='store_true', help='Delete all saved stats') + parser.add_argument('-e', '--errors', action='store_true', help='Display interface errors') parser.add_argument('-j', '--json', action='store_true', help='Display in JSON format') parser.add_argument('-r', '--raw', action='store_true', help='Raw stats (unmodified output of netstat)') - parser.add_argument('-a', '--all', action='store_true', help='Display all the stats counters') + parser.add_argument('-R', '--rate', action='store_true', help='Display interface rates') parser.add_argument('-t', '--tag', type=str, help='Save stats with name TAG', default=None) parser.add_argument('-p', '--period', type=int, help='Display stats over a specified period (in seconds).', default=0) args = parser.parse_args() @@ -274,6 +311,8 @@ Examples: save_fresh_stats = args.clear delete_saved_stats = args.delete delete_all_stats = args.delete_all + errors_only = args.errors + rates_only = args.rate use_json = args.json raw_stats = args.raw tag_name = args.tag @@ -313,11 +352,12 @@ Examples: sys.exit(0) portstat = Portstat() + # The cnstat_dict just give an ordered dict of all output. cnstat_dict = portstat.get_cnstat() # Now decide what information to display if raw_stats: - portstat.cnstat_print(cnstat_dict, use_json, print_all) + portstat.cnstat_print(cnstat_dict, use_json, print_all, errors_only, rates_only) sys.exit(0) # At this point, either we'll create a file or open an existing one. @@ -344,7 +384,7 @@ Examples: try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'r')) print "Last cached time was " + str(cnstat_cached_dict.get('time')) - portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, use_json, print_all) + portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, use_json, print_all, errors_only, rates_only) except IOError as e: print e.errno, e else: @@ -352,13 +392,13 @@ Examples: print "\nFile '%s' does not exist" % cnstat_fqn_file print "Did you run 'portstat -c -t %s' to record the counters via tag %s?\n" % (tag_name, tag_name) else: - portstat.cnstat_print(cnstat_dict, use_json, print_all) + portstat.cnstat_print(cnstat_dict, use_json, print_all, errors_only, rates_only) else: #wait for the specified time and then gather the new stats and output the difference. time.sleep(wait_time_in_seconds) print "The rates are calculated within %s seconds period" % wait_time_in_seconds cnstat_new_dict = portstat.get_cnstat() - portstat.cnstat_diff_print(cnstat_new_dict, cnstat_dict, use_json, print_all) + portstat.cnstat_diff_print(cnstat_new_dict, cnstat_dict, use_json, print_all, errors_only, rates_only) if __name__ == "__main__": main() diff --git a/show/main.py b/show/main.py index 6692ff99e1..05d0e37864 100755 --- a/show/main.py +++ b/show/main.py @@ -966,6 +966,28 @@ def counters(ctx, verbose, period, printall): run_command(cmd, display_cmd=verbose) +# 'errors' subcommand ("show interfaces counters errors") +@counters.command() +@click.option('-p', '--period') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def errors(verbose, period): + """Show interface counters errors""" + cmd = "portstat -e" + if period is not None: + cmd += " -p {}".format(period) + run_command(cmd, display_cmd=verbose) + +# 'rates' subcommand ("show interfaces counters rates") +@counters.command() +@click.option('-p', '--period') +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def rates(verbose, period): + """Show interface counters rates""" + cmd = "portstat -R" + if period is not None: + cmd += " -p {}".format(period) + run_command(cmd, display_cmd=verbose) + # 'counters' subcommand ("show interfaces counters rif") @counters.command() @click.argument('interface', metavar='', required=False, type=str) From 0ba869031aabf156fac8397a66923cf7256fbc86 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Sat, 9 May 2020 14:45:51 -0700 Subject: [PATCH 063/111] [config] Add 'interface transceiver' subgroup with 'lpmode' and 'reset' subcommands (#904) --- config/main.py | 51 ++++++++++++++++++++++++++++++++++++++++ doc/Command-Reference.md | 42 ++++++++++++++++++++++++++++++++- 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 83dc4ad7a0..709688cdf9 100755 --- a/config/main.py +++ b/config/main.py @@ -1970,6 +1970,57 @@ def remove(ctx, interface_name, ip_addr): except ValueError: ctx.fail("'ip_addr' is not valid.") +# +# 'transceiver' subgroup ('config interface transceiver ...') +# + +@interface.group(cls=AbbreviationGroup) +@click.pass_context +def transceiver(ctx): + """SFP transceiver configuration""" + pass + +# +# 'lpmode' subcommand ('config interface transceiver lpmode ...') +# + +@transceiver.command() +@click.argument('interface_name', metavar='', required=True) +@click.argument('state', metavar='(enable|disable)', type=click.Choice(['enable', 'disable'])) +@click.pass_context +def lpmode(ctx, interface_name, state): + """Enable/disable low-power mode for SFP transceiver module""" + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + if interface_name_is_valid(interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + cmd = "sudo sfputil lpmode {} {}".format("on" if state == "enable" else "off", interface_name) + run_command(cmd) + +# +# 'reset' subcommand ('config interface reset ...') +# + +@transceiver.command() +@click.argument('interface_name', metavar='', required=True) +@click.pass_context +def reset(ctx, interface_name): + """Reset SFP transceiver module""" + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + if interface_name_is_valid(interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + + cmd = "sudo sfputil reset {}".format(interface_name) + run_command(cmd) + # # 'vrf' subgroup ('config interface vrf ...') # diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index ac16d496a7..cdee6a6d0a 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2530,7 +2530,7 @@ This sub-section explains the following list of configuration on the interfaces. From 201904 release onwards, the “config interface” command syntax is changed and the format is as follows: -- config interface interface_subcommand +- config interface interface_subcommand i.e Interface name comes after the subcommand - Ex: config interface startup Ethernet63 @@ -2696,6 +2696,7 @@ This command is used to administratively shut down either the Physical interface *Versions <= 201811* ``` config interface shutdown (for 201811- version) + ``` - Example: @@ -2723,6 +2724,7 @@ This command is used for administratively bringing up the Physical interface or *Versions <= 201811* ``` config interface startup (for 201811- version) + ``` - Example: @@ -2761,6 +2763,44 @@ Dynamic breakout feature is yet to be supported in SONiC and hence uses cannot c - Example (Versions <= 201811): ``` admin@sonic:~$ sudo config interface Ethernet63 speed 40000 + + ``` + +**config interface transceiver lpmode** + +This command is used to enable or disable low-power mode for an SFP transceiver + +- Usage: + + ``` + config interface transceiver lpmode (enable | disable) + ``` + +- Examples: + + ``` + user@sonic~$ sudo config interface transceiver lpmode Ethernet0 enable + Enabling low-power mode for port Ethernet0... OK + + user@sonic~$ sudo config interface transceiver lpmode Ethernet0 disable + Disabling low-power mode for port Ethernet0... OK + ``` + +**config interface transceiver reset** + +This command is used to reset an SFP transceiver + +- Usage: + + ``` + config interface transceiver reset + ``` + +- Examples: + + ``` + user@sonic~$ sudo config interface transceiver reset Ethernet0 + Resetting port Ethernet0... OK ``` **config interface mtu (Versions >= 201904)** From fbe3750a70f31ca06da0af0eca8d1a8758802e82 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Sat, 9 May 2020 14:46:39 -0700 Subject: [PATCH 064/111] Remove dependency on click-default-group package (#903) --- clear/bgp_frr_v6.py | 43 ++++++++++--------------- clear/bgp_quagga_v4.py | 20 ++++-------- clear/bgp_quagga_v6.py | 22 ++++--------- clear/main.py | 20 +++++------- config/main.py | 18 +++-------- connect/main.py | 17 +++------- debug/main.py | 19 +++++------ setup.py | 1 - show/main.py | 71 +++++++++++++++++++----------------------- undebug/main.py | 33 +++++++++----------- 10 files changed, 98 insertions(+), 166 deletions(-) diff --git a/clear/bgp_frr_v6.py b/clear/bgp_frr_v6.py index 58c50e28d9..1ebb6c92cd 100644 --- a/clear/bgp_frr_v6.py +++ b/clear/bgp_frr_v6.py @@ -14,33 +14,23 @@ def bgp(): """Clear IPv6 BGP (Border Gateway Protocol) information""" pass - -# Default 'bgp' command (called if no subcommands or their aliases were passed) -@bgp.command(default=True) -def default(): - """Clear all BGP peers""" - command = 'sudo vtysh -c "clear bgp ipv6 *"' - run_command(command) - - @bgp.group() def neighbor(): """Clear specific BGP peers""" pass - -@neighbor.command(default=True) +# 'all' subcommand +@neighbor.command('all') @click.argument('ipaddress', required=False) -def default(ipaddress): +def neigh_all(ipaddress): """Clear all BGP peers""" if ipaddress is not None: - command = 'sudo vtysh -c "clear bgp ipv6 {} "'.format(ipaddress) + command = 'sudo vtysh -c "clear bgp ipv6 {}"'.format(ipaddress) else: command = 'sudo vtysh -c "clear bgp ipv6 *"' run_command(command) - # 'in' subcommand @neighbor.command('in') @click.argument('ipaddress', required=False) @@ -72,19 +62,6 @@ def soft(): """Soft reconfig BGP's inbound/outbound updates""" pass - -@soft.command(default=True) -@click.argument('ipaddress', required=False) -def default(ipaddress): - """Clear BGP neighbors soft configuration""" - - if ipaddress is not None: - command = 'sudo vtysh -c "clear bgp ipv6 {} soft "'.format(ipaddress) - else: - command = 'sudo vtysh -c "clear bgp ipv6 * soft"' - run_command(command) - - # 'soft in' subcommand @soft.command('in') @click.argument('ipaddress', required=False) @@ -98,6 +75,18 @@ def soft_in(ipaddress): run_command(command) +# 'soft all' subcommand +@neighbor.command('all') +@click.argument('ipaddress', required=False) +def soft_all(ipaddress): + """Clear BGP neighbors soft configuration""" + + if ipaddress is not None: + command = 'sudo vtysh -c "clear bgp ipv6 {} soft"'.format(ipaddress) + else: + command = 'sudo vtysh -c "clear bgp ipv6 * soft"' + run_command(command) + # 'soft out' subcommand @soft.command('out') @click.argument('ipaddress', required=False) diff --git a/clear/bgp_quagga_v4.py b/clear/bgp_quagga_v4.py index 56675953ca..4ebc5a1f95 100644 --- a/clear/bgp_quagga_v4.py +++ b/clear/bgp_quagga_v4.py @@ -15,27 +15,19 @@ def bgp(): pass -# Default 'bgp' command (called if no subcommands or their aliases were passed) -@bgp.command(default=True) -def default(): - """Clear all BGP peers""" - command = 'sudo vtysh -c "clear ip bgp *"' - run_command(command) - - @bgp.group() def neighbor(): """Clear specific BGP peers""" pass -@neighbor.command(default=True) +@neighbor.command('all') @click.argument('ipaddress', required=False) -def default(ipaddress): +def neigh_all(ipaddress): """Clear all BGP peers""" if ipaddress is not None: - command = 'sudo vtysh -c "clear ip bgp {} "'.format(ipaddress) + command = 'sudo vtysh -c "clear ip bgp {}"'.format(ipaddress) else: command = 'sudo vtysh -c "clear ip bgp *"' run_command(command) @@ -73,13 +65,13 @@ def soft(): pass -@soft.command(default=True) +@soft.command('all') @click.argument('ipaddress', required=False) -def default(ipaddress): +def soft_all(ipaddress): """Clear BGP neighbors soft configuration""" if ipaddress is not None: - command = 'sudo vtysh -c "clear ip bgp {} soft "'.format(ipaddress) + command = 'sudo vtysh -c "clear ip bgp {} soft"'.format(ipaddress) else: command = 'sudo vtysh -c "clear ip bgp * soft"' run_command(command) diff --git a/clear/bgp_quagga_v6.py b/clear/bgp_quagga_v6.py index ad6758d2af..fcfe3ed1fb 100644 --- a/clear/bgp_quagga_v6.py +++ b/clear/bgp_quagga_v6.py @@ -14,28 +14,18 @@ def bgp(): """Clear IPv6 BGP (Border Gateway Protocol) information""" pass - -# Default 'bgp' command (called if no subcommands or their aliases were passed) -@bgp.command(default=True) -def default(): - """Clear all BGP peers""" - command = 'sudo vtysh -c "clear ipv6 bgp *"' - run_command(command) - - @bgp.group() def neighbor(): """Clear specific BGP peers""" pass - -@neighbor.command(default=True) +@neighbor.command('all') @click.argument('ipaddress', required=False) -def default(ipaddress): +def neigh_all(ipaddress): """Clear all BGP peers""" if ipaddress is not None: - command = 'sudo vtysh -c "clear ipv6 bgp {} "'.format(ipaddress) + command = 'sudo vtysh -c "clear ipv6 bgp {}"'.format(ipaddress) else: command = 'sudo vtysh -c "clear ipv6 bgp *"' run_command(command) @@ -73,13 +63,13 @@ def soft(): pass -@soft.command(default=True) +@soft.command('all') @click.argument('ipaddress', required=False) -def default(ipaddress): +def soft_all(ipaddress): """Clear BGP neighbors soft configuration""" if ipaddress is not None: - command = 'sudo vtysh -c "clear ipv6 bgp {} soft "'.format(ipaddress) + command = 'sudo vtysh -c "clear ipv6 bgp {} soft"'.format(ipaddress) else: command = 'sudo vtysh -c "clear ipv6 bgp * soft"' run_command(command) diff --git a/clear/main.py b/clear/main.py index 669c87567a..eec3a62b8f 100755 --- a/clear/main.py +++ b/clear/main.py @@ -3,7 +3,6 @@ import click import os import subprocess -from click_default_group import DefaultGroup try: # noinspection PyPep8Naming @@ -35,12 +34,10 @@ def read_config(self, filename): _config = None -# This aliased group has been modified from click examples to inherit from DefaultGroup instead of click.Group. -# DefaultFroup is a superclass of click.Group which calls a default subcommand instead of showing -# a help message if no subcommand is passed -class AliasedGroup(DefaultGroup): - """This subclass of a DefaultGroup supports looking up aliases in a config - file and with a bit of magic. + +class AliasedGroup(click.Group): + """This subclass of click.Group supports abbreviations and + looking up aliases in a config file with a bit of magic. """ def get_command(self, ctx, cmd_name): @@ -71,12 +68,9 @@ def get_command(self, ctx, cmd_name): matches = [x for x in self.list_commands(ctx) if x.lower().startswith(cmd_name.lower())] if not matches: - # No command name matched. Issue Default command. - ctx.arg0 = cmd_name - cmd_name = self.default_cmd_name - return DefaultGroup.get_command(self, ctx, cmd_name) + return None elif len(matches) == 1: - return DefaultGroup.get_command(self, ctx, matches[0]) + return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @@ -384,7 +378,7 @@ def line(linenum): # 'nat' group ("clear nat ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def nat(): """Clear the nat info""" pass diff --git a/config/main.py b/config/main.py index 709688cdf9..c3c9b6a62d 100755 --- a/config/main.py +++ b/config/main.py @@ -14,7 +14,6 @@ import ipaddress from swsssdk import ConfigDBConnector, SonicV2Connector, SonicDBConfig from minigraph import parse_device_desc_xml -from click_default_group import DefaultGroup import aaa import mlnx @@ -63,12 +62,8 @@ def log_error(msg): syslog.closelog() -# This aliased group has been modified from click examples to inherit from DefaultGroup instead of click.Group. -# DefaultGroup is a superclass of click.Group which calls a default subcommand instead of showing -# a help message if no subcommand is passed -class AbbreviationGroup(DefaultGroup): - """This subclass of a DefaultGroup supports looking up aliases in a config - file and with a bit of magic. +class AbbreviationGroup(click.Group): + """This subclass of click.Group supports abbreviated subgroup/subcommand names """ def get_command(self, ctx, cmd_name): @@ -93,18 +88,15 @@ def get_command(self, ctx, cmd_name): shortest = x if not matches: - # No command name matched. Issue Default command. - ctx.arg0 = cmd_name - cmd_name = self.default_cmd_name - return DefaultGroup.get_command(self, ctx, cmd_name) + return None elif len(matches) == 1: - return DefaultGroup.get_command(self, ctx, matches[0]) + return click.Group.get_command(self, ctx, matches[0]) else: for x in matches: if not x.startswith(shortest): break else: - return DefaultGroup.get_command(self, ctx, shortest) + return click.Group.get_command(self, ctx, shortest) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) diff --git a/connect/main.py b/connect/main.py index b34ee11aab..ef40af3bbb 100755 --- a/connect/main.py +++ b/connect/main.py @@ -3,7 +3,6 @@ import click import os import pexpect -from click_default_group import DefaultGroup try: # noinspection PyPep8Naming @@ -35,12 +34,9 @@ def read_config(self, filename): _config = None -# This aliased group has been modified from click examples to inherit from DefaultGroup instead of click.Group. -# DefaultGroup is a superclass of click.Group which calls a default subcommand instead of showing -# a help message if no subcommand is passed -class AliasedGroup(DefaultGroup): - """This subclass of a DefaultGroup supports looking up aliases in a config - file and with a bit of magic. +class AliasedGroup(click.Group): + """This subclass of click.Group supports abbreviations and + looking up aliases in a config file with a bit of magic. """ def get_command(self, ctx, cmd_name): @@ -71,12 +67,9 @@ def get_command(self, ctx, cmd_name): matches = [x for x in self.list_commands(ctx) if x.lower().startswith(cmd_name.lower())] if not matches: - # No command name matched. Issue Default command. - ctx.arg0 = cmd_name - cmd_name = self.default_cmd_name - return DefaultGroup.get_command(self, ctx, cmd_name) + return None elif len(matches) == 1: - return DefaultGroup.get_command(self, ctx, matches[0]) + return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) def run_command(command, display_cmd=False): diff --git a/debug/main.py b/debug/main.py index b72394b22d..cbcca05064 100755 --- a/debug/main.py +++ b/debug/main.py @@ -3,7 +3,6 @@ import click import subprocess -from click_default_group import DefaultGroup def run_command(command, pager=False): click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) @@ -192,17 +191,13 @@ def vxlan(): # # 'bgp' group for quagga ### # - @cli.group(cls=DefaultGroup, default_if_no_args=True) - #@cli.group() - def bgp(): - """debug bgp on """ - pass - - @bgp.command(default=True) - def default(): - """debug bgp""" - command = 'sudo vtysh -c "debug bgp"' - run_command(command) + @cli.group(invoke_without_command=True) + @click.pass_context + def bgp(ctx): + """debug bgp on""" + if ctx.invoked_subcommand is None: + command = 'sudo vtysh -c "debug bgp"' + run_command(command) @bgp.command() def events(): diff --git a/setup.py b/setup.py index 2924566c7d..a1fece9118 100644 --- a/setup.py +++ b/setup.py @@ -144,7 +144,6 @@ # - swsssdk # - tabulate install_requires=[ - 'click-default-group', 'click', 'natsort' ], diff --git a/show/main.py b/show/main.py index 05d0e37864..5c19ce6167 100755 --- a/show/main.py +++ b/show/main.py @@ -11,7 +11,6 @@ from pkg_resources import parse_version import click -from click_default_group import DefaultGroup from natsort import natsorted from tabulate import tabulate @@ -119,12 +118,9 @@ def alias_to_name(self, interface_alias): _config = None -# This aliased group has been modified from click examples to inherit from DefaultGroup instead of click.Group. -# DefaultGroup is a superclass of click.Group which calls a default subcommand instead of showing -# a help message if no subcommand is passed -class AliasedGroup(DefaultGroup): - """This subclass of a DefaultGroup supports looking up aliases in a config - file and with a bit of magic. +class AliasedGroup(click.Group): + """This subclass of click.Group supports abbreviations and + looking up aliases in a config file with a bit of magic. """ def get_command(self, ctx, cmd_name): @@ -155,12 +151,9 @@ def get_command(self, ctx, cmd_name): matches = [x for x in self.list_commands(ctx) if x.lower().startswith(cmd_name.lower())] if not matches: - # No command name matched. Issue Default command. - ctx.arg0 = cmd_name - cmd_name = self.default_cmd_name - return DefaultGroup.get_command(self, ctx, cmd_name) + return None elif len(matches) == 1: - return DefaultGroup.get_command(self, ctx, matches[0]) + return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) @@ -685,7 +678,7 @@ def mgmt_vrf(ctx,routes): # 'management_interface' group ("show management_interface ...") # -@cli.group(name='management_interface', cls=AliasedGroup, default_if_no_args=False) +@cli.group(name='management_interface', cls=AliasedGroup) def management_interface(): """Show management interface parameters""" pass @@ -751,7 +744,7 @@ def snmptrap (ctx): # 'interfaces' group ("show interfaces ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def interfaces(): """Show details of the network interfaces""" pass @@ -796,7 +789,7 @@ def alias(interfacename): # # 'neighbor' group ### # -@interfaces.group(cls=AliasedGroup, default_if_no_args=False) +@interfaces.group(cls=AliasedGroup) def neighbor(): """Show neighbor related information""" pass @@ -853,7 +846,7 @@ def expected(interfacename): click.echo(tabulate(body, header)) -@interfaces.group(cls=AliasedGroup, default_if_no_args=False) +@interfaces.group(cls=AliasedGroup) def transceiver(): """Show SFP Transceiver information""" pass @@ -1016,7 +1009,7 @@ def portchannel(verbose): # 'subinterfaces' group ("show subinterfaces ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def subinterfaces(): """Show details of the sub port interfaces""" pass @@ -1047,7 +1040,7 @@ def status(subinterfacename, verbose): # 'pfc' group ("show pfc ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def pfc(): """Show details of the priority-flow-control (pfc) """ pass @@ -1089,7 +1082,7 @@ def asymmetric(interface): run_command(cmd) # 'pfcwd' subcommand ("show pfcwd...") -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def pfcwd(): """Show details of the pfc watchdog """ pass @@ -1125,7 +1118,7 @@ def naming_mode(verbose): # 'watermark' group ("show watermark telemetry interval") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def watermark(): """Show details of watermark """ pass @@ -1146,7 +1139,7 @@ def show_tm_interval(): # 'queue' group ("show queue ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def queue(): """Show details of the queues """ pass @@ -1220,7 +1213,7 @@ def pwm_q_multi(): # 'priority-group' group ("show priority-group ...") # -@cli.group(name='priority-group', cls=AliasedGroup, default_if_no_args=False) +@cli.group(name='priority-group', cls=AliasedGroup) def priority_group(): """Show details of the PGs """ @@ -1263,7 +1256,7 @@ def pwm_pg_shared(): # 'buffer_pool' group ("show buffer_pool ...") # -@cli.group(name='buffer_pool', cls=AliasedGroup, default_if_no_args=False) +@cli.group(name='buffer_pool', cls=AliasedGroup) def buffer_pool(): """Show details of the buffer pools""" @@ -1321,7 +1314,7 @@ def route_map(route_map_name, verbose): # # This group houses IP (i.e., IPv4) commands and subgroups -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def ip(): """Show IP (IPv4) commands""" pass @@ -1504,7 +1497,7 @@ def protocol(verbose): # # This group houses IPv6-related commands and subgroups -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def ipv6(): """Show IPv6 commands""" pass @@ -1622,7 +1615,7 @@ def protocol(verbose): # 'lldp' group ("show lldp ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def lldp(): """LLDP (Link Layer Discovery Protocol) information""" pass @@ -1676,7 +1669,7 @@ def get_hw_info_dict(): hw_info_dict['asic_type'] = asic_type return hw_info_dict -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def platform(): """Show platform-specific hardware info""" pass @@ -1825,7 +1818,7 @@ def environment(verbose): # 'processes' group ("show processes ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def processes(): """Display process information""" pass @@ -1888,7 +1881,7 @@ def techsupport(since, verbose): # 'runningconfiguration' group ("show runningconfiguration") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def runningconfiguration(): """Show current running configuration information""" pass @@ -2002,7 +1995,7 @@ def syslog(verbose): # 'startupconfiguration' group ("show startupconfiguration ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def startupconfiguration(): """Show startup configuration information""" pass @@ -2074,7 +2067,7 @@ def system_memory(verbose): cmd = "free -m" run_command(cmd, display_cmd=verbose) -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def vlan(): """Show VLAN information""" pass @@ -2082,7 +2075,7 @@ def vlan(): # # 'kdump command ("show kdump ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=True, ) +@cli.group(cls=AliasedGroup) def kdump(): """Show kdump configuration, status and information """ pass @@ -2105,7 +2098,7 @@ def enabled(): else: click.echo("kdump is disabled") -@kdump.command('status', default=True) +@kdump.command('status') def status(): """Show kdump status""" run_command("sonic-kdump-config --status") @@ -2478,7 +2471,7 @@ def show_sflow_global(config_db): # 'acl' group ### # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def acl(): """Show ACL related information""" pass @@ -2520,7 +2513,7 @@ def table(table_name, verbose): # 'dropcounters' group ### # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def dropcounters(): """Show drop counter related information""" pass @@ -2631,7 +2624,7 @@ def line(): return -@cli.group(name='warm_restart', cls=AliasedGroup, default_if_no_args=False) +@cli.group(name='warm_restart', cls=AliasedGroup) def warm_restart(): """Show warm restart configuration and state""" pass @@ -2757,7 +2750,7 @@ def tablelize(keys, data, enable_table_keys, prefix): # 'nat' group ("show nat ...") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def nat(): """Show details of the nat """ pass @@ -2928,7 +2921,7 @@ def autorestart(container_name): # # 'vnet' command ("show vnet") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def vnet(): """Show vnet related information""" pass @@ -3153,7 +3146,7 @@ def tunnel(): # # 'vxlan' command ("show vxlan") # -@cli.group(cls=AliasedGroup, default_if_no_args=False) +@cli.group(cls=AliasedGroup) def vxlan(): """Show vxlan related information""" pass diff --git a/undebug/main.py b/undebug/main.py index a148445f73..313c551cb6 100644 --- a/undebug/main.py +++ b/undebug/main.py @@ -3,7 +3,6 @@ import click import subprocess -from click_default_group import DefaultGroup def run_command(command, pager=False): click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) @@ -192,57 +191,53 @@ def vxlan(): # # 'bgp' group for quagga ### # - @cli.group(cls=DefaultGroup, default_if_no_args=True) - #@cli.group() - def bgp(): - """debug bgp on """ - pass - - @bgp.command(default=True) - def default(): - """debug bgp""" - command = 'sudo vtysh -c "no debug bgp"' - run_command(command) + @cli.group(invoke_without_command=True) + @click.pass_context + def bgp(ctx): + """debug bgp off""" + if ctx.invoked_subcommand is None: + command = 'sudo vtysh -c "no debug bgp"' + run_command(command) @bgp.command() def events(): - """debug bgp events on""" + """debug bgp events off""" command = 'sudo vtysh -c "no debug bgp events"' run_command(command) @bgp.command() def updates(): - """debug bgp updates on""" + """debug bgp updates off""" command = 'sudo vtysh -c "no debug bgp updates"' run_command(command) @bgp.command() def as4(): - """debug bgp as4 actions on""" + """debug bgp as4 actions off""" command = 'sudo vtysh -c "no debug bgp as4"' run_command(command) @bgp.command() def filters(): - """debug bgp filters on""" + """debug bgp filters off""" command = 'sudo vtysh -c "no debug bgp filters"' run_command(command) @bgp.command() def fsm(): - """debug bgp finite state machine on""" + """debug bgp finite state machine off""" command = 'sudo vtysh -c "no debug bgp fsm"' run_command(command) @bgp.command() def keepalives(): - """debug bgp keepalives on""" + """debug bgp keepalives off""" command = 'sudo vtysh -c "no debug bgp keepalives"' run_command(command) @bgp.command() def zebra(): - """debug bgp zebra messages on""" + """debug bgp zebra messages off""" command = 'sudo vtysh -c "no debug bgp zebra"' run_command(command) From 30a26362a1be79853477b12463b861cd81665df7 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Sat, 9 May 2020 17:38:00 -0700 Subject: [PATCH 065/111] [config] Log invocation of config commands to syslog (#259) --- config/main.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/config/main.py b/config/main.py index c3c9b6a62d..45042d2647 100755 --- a/config/main.py +++ b/config/main.py @@ -680,6 +680,7 @@ def save(filename): else: command = "{} -n {} -d --print-data > {}".format(SONIC_CFGGEN_PATH, namespace, file) + log_info("'save' executing...") run_command(command, display_cmd=True) @config.command() @@ -741,6 +742,7 @@ def load(filename, yes): else: command = "{} -n {} -j {} --write-to-db".format(SONIC_CFGGEN_PATH, namespace, file) + log_info("'load' executing...") run_command(command, display_cmd=True) @@ -871,6 +873,7 @@ def reload(filename, yes, load_sysinfo): @click.argument('filename', default='/etc/sonic/device_desc.xml', type=click.Path(exists=True)) def load_mgmt_config(filename): """Reconfigure hostname and mgmt interface based on device description file.""" + log_info("'load_mgmt_config' executing...") command = "{} -M {} --write-to-db".format(SONIC_CFGGEN_PATH, filename) run_command(command, display_cmd=True) #FIXME: After config DB daemon for hostname and mgmt interface is implemented, we'll no longer need to do manual configuration here @@ -1182,14 +1185,19 @@ def start_default(verbose): @config.group(cls=AbbreviationGroup) @click.pass_context def qos(ctx): + """QoS-related configuration tasks""" pass @qos.command('clear') def clear(): + """Clear QoS configuration""" + log_info("'qos clear' executing...") _clear_qos() @qos.command('reload') def reload(): + """Reload QoS configuration""" + log_info("'qos reload' executing...") _clear_qos() platform = _get_platform() hwsku = _get_hwsku() @@ -1321,6 +1329,8 @@ def add_vlan(ctx, vid): @click.argument('vid', metavar='', required=True, type=int) @click.pass_context def del_vlan(ctx, vid): + """Delete VLAN""" + log_info("'vlan del {}' executing...".format(vid)) db = ctx.obj['db'] keys = [ (k, v) for k, v in db.get_table('VLAN_MEMBER') if k == 'Vlan{}'.format(vid) ] for k in keys: @@ -1343,6 +1353,8 @@ def vlan_member(ctx): @click.option('-u', '--untagged', is_flag=True) @click.pass_context def add_vlan_member(ctx, vid, interface_name, untagged): + """Add VLAN member""" + log_info("'vlan member add {} {}' executing...".format(vid, interface_name)) db = ctx.obj['db'] vlan_name = 'Vlan{}'.format(vid) vlan = db.get_entry('VLAN', vlan_name) @@ -1381,6 +1393,8 @@ def add_vlan_member(ctx, vid, interface_name, untagged): @click.argument('interface_name', metavar='', required=True) @click.pass_context def del_vlan_member(ctx, vid, interface_name): + """Delete VLAN member""" + log_info("'vlan member del {} {}' executing...".format(vid, interface_name)) db = ctx.obj['db'] vlan_name = 'Vlan{}'.format(vid) vlan = db.get_entry('VLAN', vlan_name) @@ -1674,6 +1688,7 @@ def num_dumps(kdump_num_dumps): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def all(verbose): """Shut down all BGP sessions""" + log_info("'bgp shutdown all' executing...") bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses() for ipaddress in bgp_neighbor_ip_list: _change_bgp_session_status_by_addr(ipaddress, 'down', verbose) @@ -1684,6 +1699,7 @@ def all(verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): """Shut down BGP session by neighbor IP address or hostname""" + log_info("'bgp shutdown neighbor {}' executing...".format(ipaddr_or_hostname)) _change_bgp_session_status(ipaddr_or_hostname, 'down', verbose) @bgp.group(cls=AbbreviationGroup) @@ -1696,6 +1712,7 @@ def startup(): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def all(verbose): """Start up all BGP sessions""" + log_info("'bgp startup all' executing...") bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses() for ipaddress in bgp_neighbor_ip_list: _change_bgp_session_status(ipaddress, 'up', verbose) @@ -1706,6 +1723,7 @@ def all(verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): """Start up BGP session by neighbor IP address or hostname""" + log_info("'bgp startup neighbor {}' executing...".format(ipaddr_or_hostname)) _change_bgp_session_status(ipaddr_or_hostname, 'up', verbose) # @@ -1745,6 +1763,7 @@ def interface(ctx): @click.pass_context def startup(ctx, interface_name): """Start up interface""" + config_db = ctx.obj['config_db'] if get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(interface_name) @@ -1754,6 +1773,8 @@ def startup(ctx, interface_name): if interface_name_is_valid(interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + log_info("'interface startup {}' executing...".format(interface_name)) + if interface_name.startswith("Ethernet"): if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: config_db.mod_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "up"}) @@ -1773,6 +1794,7 @@ def startup(ctx, interface_name): @click.pass_context def shutdown(ctx, interface_name): """Shut down interface""" + log_info("'interface shutdown {}' executing...".format(interface_name)) config_db = ctx.obj['config_db'] if get_interface_naming_mode() == "alias": interface_name = interface_alias_to_name(interface_name) @@ -1809,6 +1831,8 @@ def speed(ctx, interface_name, interface_speed, verbose): if interface_name is None: ctx.fail("'interface_name' is None!") + log_info("'interface speed {} {}' executing...".format(interface_name, interface_speed)) + command = "portconfig -p {} -s {}".format(interface_name, interface_speed) if verbose: command += " -vv" @@ -2361,6 +2385,7 @@ def update(): @click.argument('file_name', required=True) def full(file_name): """Full update of ACL rules configuration.""" + log_info("'acl update full {}' executing...".format(file_name)) command = "acl-loader update full {}".format(file_name) run_command(command) @@ -2373,6 +2398,7 @@ def full(file_name): @click.argument('file_name', required=True) def incremental(file_name): """Incremental update of ACL rule configuration.""" + log_info("'acl update incremental {}' executing...".format(file_name)) command = "acl-loader update incremental {}".format(file_name) run_command(command) @@ -2463,6 +2489,7 @@ def remove_reasons(counter_name, reasons, verbose): @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, verbose): """ECN-related configuration tasks""" + log_info("'ecn -profile {}' executing...".format(profile)) command = "ecnconfig -p %s" % profile if rmax is not None: command += " -rmax %d" % rmax if rmin is not None: command += " -rmin %d" % rmin From e5c7c17c2f19487f464ef3768108d71688548ee5 Mon Sep 17 00:00:00 2001 From: yangshiping <43633727+yangshp1987@users.noreply.github.com> Date: Mon, 11 May 2020 23:51:39 +0800 Subject: [PATCH 066/111] [show] Add `ntpstat` output to `show ntp` (#861) Signed-off-by: yangshiping --- doc/Command-Reference.md | 5 +++++ show/main.py | 10 ++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index cdee6a6d0a..384c1f357e 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3997,12 +3997,17 @@ This command displays a list of NTP peers known to the server as well as a summa - Example: ``` admin@sonic:~$ show ntp + synchronised to NTP server (204.2.134.164) at stratum 3 + time correct to within 326797 ms + polling server every 1024 s + remote refid st t when poll reach delay offset jitter ============================================================================== 23.92.29.245 .XFAC. 16 u - 1024 0 0.000 0.000 0.000 *204.2.134.164 46.233.231.73 2 u 916 1024 377 3.079 0.394 0.128 ``` + ### NTP Config Commands This sub-section of commands is used to add or remove the configured NTP servers. diff --git a/show/main.py b/show/main.py index 5c19ce6167..363895fce1 100755 --- a/show/main.py +++ b/show/main.py @@ -2028,20 +2028,22 @@ def bgp(verbose): @click.option('--verbose', is_flag=True, help="Enable verbose output") def ntp(ctx, verbose): """Show NTP information""" + ntpstat_cmd = "ntpstat" ntpcmd = "ntpq -p -n" if is_mgmt_vrf_enabled(ctx) is True: #ManagementVRF is enabled. Call ntpq using "ip vrf exec" or cgexec based on linux version os_info = os.uname() release = os_info[2].split('-') if parse_version(release[0]) > parse_version("4.9.0"): - ntpcmd = "ip vrf exec mgmt ntpq -p -n" + ntpstat_cmd = "sudo ip vrf exec mgmt ntpstat" + ntpcmd = "sudo ip vrf exec mgmt ntpq -p -n" else: - ntpcmd = "cgexec -g l3mdev:mgmt ntpq -p -n" + ntpstat_cmd = "sudo cgexec -g l3mdev:mgmt ntpstat" + ntpcmd = "sudo cgexec -g l3mdev:mgmt ntpq -p -n" + run_command(ntpstat_cmd, display_cmd=verbose) run_command(ntpcmd, display_cmd=verbose) - - # # 'uptime' command ("show uptime") # From 96e5abd9c995651cbda35233310f91a4afb8d0c3 Mon Sep 17 00:00:00 2001 From: Dong Zhang <41927498+dzhangalibaba@users.noreply.github.com> Date: Wed, 13 May 2020 09:34:57 -0700 Subject: [PATCH 067/111] [MultiDB] use sonic-db-cli instead of redis-cli in new added codes (#907) --- scripts/fast-reboot | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index ec0ccae1b7..0478b3af1e 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -107,7 +107,7 @@ function clear_fast_boot() { common_clear - redis-cli -n 6 DEL "FAST_REBOOT|system" &>/dev/null || /bin/true + sonic-db-cli STATE_DB DEL "FAST_REBOOT|system" &>/dev/null || /bin/true } function clear_warm_boot() @@ -208,7 +208,8 @@ function backup_database() end end " 0 > /dev/null - redis-cli save > /dev/null + sonic-db-cli SAVE > /dev/null + #TODO : need a script to copy all rdb files if there is multiple db instances config docker cp database:/var/lib/redis/$REDIS_FILE $WARM_DIR docker exec -i database rm /var/lib/redis/$REDIS_FILE } @@ -317,7 +318,7 @@ case "$REBOOT_TYPE" in "fast-reboot") BOOT_TYPE_ARG=$REBOOT_TYPE trap clear_fast_boot EXIT HUP INT QUIT TERM KILL ABRT ALRM - redis-cli -n 6 SET "FAST_REBOOT|system" "1" "EX" "180" &>/dev/null + sonic-db-cli STATE_DB SET "FAST_REBOOT|system" "1" "EX" "180" &>/dev/null ;; "warm-reboot") if [[ "$sonic_asic_type" == "mellanox" ]]; then From a2c4c30b548117a8309087ee5d7a452612fb0c27 Mon Sep 17 00:00:00 2001 From: Junchao-Mellanox <57339448+Junchao-Mellanox@users.noreply.github.com> Date: Thu, 14 May 2020 01:06:42 +0800 Subject: [PATCH 068/111] [fanshow] Add column drawer name and led status to output of show platform fan (#881) --- scripts/fanshow | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/fanshow b/scripts/fanshow index ea6ff3dbd5..7e1125938d 100644 --- a/scripts/fanshow +++ b/scripts/fanshow @@ -9,13 +9,15 @@ from swsssdk import SonicV2Connector from natsort import natsorted -header = ['FAN', 'Speed', 'Direction', 'Presence', 'Status', 'Timestamp'] +header = ['Drawer', 'LED', 'FAN', 'Speed', 'Direction', 'Presence', 'Status', 'Timestamp'] FAN_TABLE_NAME = 'FAN_INFO' +DRAWER_FIELD_NAME = 'drawer_name' SPEED_FIELD_NAME = 'speed' DIRECTION_FIELD_NAME = 'direction' PRESENCE_FIELD_NAME = 'presence' STATUS_FIELD_NAME = 'status' +LED_STATUS_FIELD_NAME = 'led_status' TIMESTAMP_FIELD_NAME = 'timestamp' @@ -58,7 +60,8 @@ class FanShow(object): else: status = 'N/A' - table.append((name, speed, data_dict[DIRECTION_FIELD_NAME], presence, status, data_dict[TIMESTAMP_FIELD_NAME])) + table.append((data_dict[DRAWER_FIELD_NAME], data_dict[LED_STATUS_FIELD_NAME], name, speed, data_dict[DIRECTION_FIELD_NAME], presence, status, + data_dict[TIMESTAMP_FIELD_NAME])) if table: print(tabulate(table, header, tablefmt='simple', stralign='right')) From bae3f057d9b28ba28ba2e0a125ce78a12fde954c Mon Sep 17 00:00:00 2001 From: Sumukha Tumkur Vani Date: Wed, 13 May 2020 10:27:56 -0700 Subject: [PATCH 069/111] Stop/Start restapi server upon config reload (#911) --- config/main.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 45042d2647..063f94e518 100755 --- a/config/main.py +++ b/config/main.py @@ -545,6 +545,7 @@ def _abort_if_false(ctx, param, value): def _stop_services(): # on Mellanox platform pmon is stopped by syncd services_to_stop = [ + 'restapi', 'swss', 'lldp', 'pmon', @@ -575,7 +576,8 @@ def _reset_failed_services(): 'syncd', 'teamd', 'nat', - 'sflow' + 'sflow', + 'restapi' ] execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED) @@ -595,6 +597,7 @@ def _restart_services(): 'hostcfgd', 'nat', 'sflow', + 'restapi' ] if asic_type == 'mellanox' and 'pmon' in services_to_restart: From e8904d2aae744f5e0d2095bdf9fef1afb0bfac03 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Wed, 13 May 2020 16:32:26 -0700 Subject: [PATCH 070/111] Changes to support acl-loader and mirror-session config commands for multi-npu platforms. (#908) * Changes to support acl-loader command for multi-npu platforms. Move multi-npu related utility functions from config/main.py to sonic-device-util.py so that it can be used by acl-loader/any other module. Updated Mirror Session add/remove for multi-npu platforms. Needed for Everflow ACL rule programming. * Address review comment to change comment from """ to # except for doc string --- acl_loader/main.py | 120 ++++++++++++++++++++++++++++++++++++++------- config/main.py | 117 +++++++++++++++---------------------------- 2 files changed, 143 insertions(+), 94 deletions(-) diff --git a/acl_loader/main.py b/acl_loader/main.py index 362f1f75ea..f9201846b3 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -6,11 +6,13 @@ import syslog import tabulate from natsort import natsorted +import sonic_device_util import openconfig_acl import pyangbind.lib.pybindJSON as pybindJSON from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector +from swsssdk import SonicDBConfig def info(msg): @@ -114,12 +116,39 @@ def __init__(self): self.tables_db_info = {} self.rules_db_info = {} self.rules_info = {} + # Load global db config. This call is no-op in single npu platforms + SonicDBConfig.load_sonic_global_db_config() self.sessions_db_info = {} self.configdb = ConfigDBConnector() self.configdb.connect() self.statedb = SonicV2Connector(host="127.0.0.1") self.statedb.connect(self.statedb.STATE_DB) + # For multi-npu architecture we will have both global and per front asic namespace. + # Global namespace will be used for Control plane ACL which are via IPTables. + # Per ASIC namespace will be used for Data and Everflow ACL's. + # Global Configdb will have all ACL information for both Ctrl and Data/Evereflow ACL's + # and will be used as souurce of truth for ACL modification to config DB which will be done to both Global DB and + # front asic namespace + + self.per_npu_configdb = {} + + # State DB are used for to get mirror Session monitor port. + # For multi-npu platforms each asic namespace can have different monitor port + # dependinding on which route to session destination ip. So for multi-npu + # platforms we get state db for all front asic namespace in addition to + + self.per_npu_statedb = {} + + # Getting all front asic namespace and correspding config and state DB connector + + namespaces = sonic_device_util.get_all_namespaces() + for front_asic_namespaces in namespaces['front_ns']: + self.per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + self.per_npu_configdb[front_asic_namespaces].connect() + self.per_npu_statedb[front_asic_namespaces] = SonicV2Connector(use_unix_socket_path=True, namespace=front_asic_namespaces) + self.per_npu_statedb[front_asic_namespaces].connect(self.per_npu_statedb[front_asic_namespaces].STATE_DB) + self.read_tables_info() self.read_rules_info() self.read_sessions_info() @@ -150,7 +179,14 @@ def read_policers_info(self): Read POLICER table from configuration database :return: """ - self.policers_db_info = self.configdb.get_table(self.POLICER) + + # For multi-npu platforms we will read from any one of front asic namespace + # config db as the information should be same across all config db + if self.per_npu_configdb: + namespace_configdb = (self.per_npu_configdb.values())[0] + self.policers_db_info = namespace_configdb.get_table(self.POLICER) + else: + self.policers_db_info = self.configdb.get_table(self.POLICER) def get_policers_db_info(self): return self.policers_db_info @@ -160,17 +196,30 @@ def read_sessions_info(self): Read MIRROR_SESSION table from configuration database :return: """ - self.sessions_db_info = self.configdb.get_table(self.CFG_MIRROR_SESSION_TABLE) + + # For multi-npu platforms we will read from any one of front asic namespace + # config db as the information should be same across all config db + if self.per_npu_configdb: + namespace_configdb = (self.per_npu_configdb.values())[0] + self.sessions_db_info = namespace_configdb.get_table(self.CFG_MIRROR_SESSION_TABLE) + else: + self.sessions_db_info = self.configdb.get_table(self.CFG_MIRROR_SESSION_TABLE) for key in self.sessions_db_info.keys(): - state_db_info = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.STATE_MIRROR_SESSION_TABLE, key)) - monitor_port = "" - if state_db_info: - status = state_db_info.get("status", "inactive") - monitor_port = state_db_info.get("monitor_port", "") + if self.per_npu_statedb: + # For multi-npu platforms we will read from all front asic name space + # statedb as the monitor port will be differnt for each asic + # and it's status also might be different (ideally should not happen) + # We will store them as dict of 'asic' : value + self.sessions_db_info[key]["status"] = {} + self.sessions_db_info[key]["monitor_port"] = {} + for namespace_key, namespace_statedb in self.per_npu_statedb.iteritems(): + state_db_info = namespace_statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.STATE_MIRROR_SESSION_TABLE, key)) + self.sessions_db_info[key]["status"][namespace_key] = state_db_info.get("status", "inactive") if state_db_info else "error" + self.sessions_db_info[key]["monitor_port"][namespace_key] = state_db_info.get("monitor_port", "") if state_db_info else "" else: - status = "error" - self.sessions_db_info[key]["status"] = status - self.sessions_db_info[key]["monitor_port"] = monitor_port + state_db_info = self.statedb.get_all(self.statedb.STATE_DB, "{}|{}".format(self.STATE_MIRROR_SESSION_TABLE, key)) + self.sessions_db_info[key]["status"] = state_db_info.get("status", "inactive") if state_db_info else "error" + self.sessions_db_info[key]["monitor_port"] = state_db_info.get("monitor_port", "") if state_db_info else "" def get_sessions_db_info(self): return self.sessions_db_info @@ -309,7 +358,17 @@ def validate_actions(self, table_name, action_props): raise AclLoaderException("Table {} does not exist".format(table_name)) stage = self.tables_db_info[table_name].get("stage", Stage.INGRESS) - capability = self.statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) + + # check if per npu state db is there then read using first state db + # else read from global statedb + if self.per_npu_statedb: + # For multi-npu we will read using anyone statedb connector for front asic namespace. + # Same information should be there in all state DB's + # as it is static information about switch capability + namespace_statedb = (self.per_npu_statedb.values())[0] + capability = namespace_statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) + else: + capability = self.statedb.get_all(self.statedb.STATE_DB, "{}|switch".format(self.SWITCH_CAPABILITY_TABLE)) for action_key in dict(action_props): key = "{}|{}".format(self.ACL_ACTIONS_CAPABILITY_FIELD, stage.upper()) if key not in capability: @@ -518,9 +577,16 @@ def full_update(self): """ for key in self.rules_db_info.keys(): if self.current_table is None or self.current_table == key[0]: - self.configdb.mod_entry(self.ACL_RULE, key, None) + self.configdb.mod_entry(self.ACL_RULE, key, None) + # Program for per front asic namespace also if present + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.mod_entry(self.ACL_RULE, key, None) + self.configdb.mod_config({self.ACL_RULE: self.rules_info}) + # Program for per front asic namespace also if present + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.mod_config({self.ACL_RULE: self.rules_info}) def incremental_update(self): """ @@ -559,10 +625,17 @@ def incremental_update(self): # Remove all existing dataplane rules for key in current_dataplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, None) + # Program for per-asic namespace also if present + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.mod_entry(self.ACL_RULE, key, None) + # Add all new dataplane rules for key in new_dataplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) + # Program for per-asic namespace corresponding to front asic also if present. + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) added_controlplane_rules = new_controlplane_rules.difference(current_controlplane_rules) removed_controlplane_rules = current_controlplane_rules.difference(new_controlplane_rules) @@ -570,14 +643,25 @@ def incremental_update(self): for key in added_controlplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) + # Program for per-asic namespace corresponding to front asic also if present. + # For control plane ACL it's not needed but to keep all db in sync program everywhere + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.mod_entry(self.ACL_RULE, key, self.rules_info[key]) for key in removed_controlplane_rules: self.configdb.mod_entry(self.ACL_RULE, key, None) + # Program for per-asic namespace corresponding to front asic also if present. + # For control plane ACL it's not needed but to keep all db in sync program everywhere + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.mod_entry(self.ACL_RULE, key, None) for key in existing_controlplane_rules: if cmp(self.rules_info[key], self.rules_db_info[key]) != 0: self.configdb.set_entry(self.ACL_RULE, key, self.rules_info[key]) - + # Program for per-asic namespace corresponding to front asic also if present. + # For control plane ACL it's not needed but to keep all db in sync program everywhere + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.set_entry(self.ACL_RULE, key, self.rules_info[key]) def delete(self, table=None, rule=None): """ @@ -589,8 +673,10 @@ def delete(self, table=None, rule=None): if not table or table == key[0]: if not rule or rule == key[1]: self.configdb.set_entry(self.ACL_RULE, key, None) - - + # Program for per-asic namespace corresponding to front asic also if present. + for namespace_configdb in self.per_npu_configdb.values(): + namespace_configdb.set_entry(self.ACL_RULE, key, None) + def show_table(self, table_name): """ Show ACL table configuration. @@ -626,7 +712,6 @@ def show_table(self, table_name): print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) - def show_session(self, session_name): """ Show mirror session configuration. @@ -639,7 +724,8 @@ def show_session(self, session_name): for key, val in self.get_sessions_db_info().iteritems(): if session_name and key != session_name: continue - + # For multi-mpu platform status and monitor port will be dict() + # of 'asic-x':value data.append([key, val["status"], val["src_ip"], val["dst_ip"], val.get("gre_type", ""), val.get("dscp", ""), val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""), diff --git a/config/main.py b/config/main.py index 063f94e518..8e110cd21d 100755 --- a/config/main.py +++ b/config/main.py @@ -117,7 +117,7 @@ def get_command(self, ctx, cmd_name): # Execute action on list of systemd services def execute_systemctl(list_of_services, action): - num_asic = _get_num_asic() + num_asic = sonic_device_util.get_num_npus() generated_services_list, generated_multi_instance_services = _get_sonic_generated_services(num_asic) if ((generated_services_list == []) and (generated_multi_instance_services == [])): @@ -156,44 +156,12 @@ def run_command(command, display_cmd=False, ignore_error=False): if proc.returncode != 0 and not ignore_error: sys.exit(proc.returncode) -# API to check if this is a multi-asic device or not. -def is_multi_asic(): - num_asics = _get_num_asic() - - if num_asics > 1: - return True - else: - return False - -"""In case of Multi-Asic platform, Each ASIC will have a linux network namespace created. - So we loop through the databases in different namespaces and depending on the sub_role - decide whether this is a front end ASIC/namespace or a back end one. -""" -def get_all_namespaces(): - front_ns = [] - back_ns = [] - num_asics = _get_num_asic() - - if is_multi_asic(): - for asic in range(num_asics): - namespace = "{}{}".format(NAMESPACE_PREFIX, asic) - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) - config_db.connect() - - metadata = config_db.get_table('DEVICE_METADATA') - if metadata['localhost']['sub_role'] == 'FrontEnd': - front_ns.append(namespace) - elif metadata['localhost']['sub_role'] == 'BackEnd': - back_ns.append(namespace) - - return {'front_ns': front_ns, 'back_ns': back_ns} - # Validate whether a given namespace name is valid in the device. def validate_namespace(namespace): - if not is_multi_asic(): + if not sonic_device_util.is_multi_npu(): return True - namespaces = get_all_namespaces() + namespaces = sonic_device_util.get_all_namespaces() if namespace in namespaces['front_ns'] + namespaces['back_ns']: return True else: @@ -494,32 +462,6 @@ def _clear_qos(): for qos_table in QOS_TABLE_NAMES: config_db.delete_table(qos_table) -def _get_hwsku(): - config_db = ConfigDBConnector() - config_db.connect() - metadata = config_db.get_table('DEVICE_METADATA') - return metadata['localhost']['hwsku'] - -def _get_platform(): - with open('/host/machine.conf') as machine_conf: - for line in machine_conf: - tokens = line.split('=') - if tokens[0].strip() == 'onie_platform' or tokens[0].strip() == 'aboot_platform': - return tokens[1].strip() - return '' - -def _get_num_asic(): - platform = _get_platform() - num_asic = 1 - asic_conf_file = os.path.join('/usr/share/sonic/device/', platform, ASIC_CONF_FILENAME) - if os.path.isfile(asic_conf_file): - with open(asic_conf_file) as conf_file: - for line in conf_file: - line_info = line.split('=') - if line_info[0].lower() == "num_asic": - num_asic = int(line_info[1]) - return num_asic - def _get_sonic_generated_services(num_asic): if not os.path.isfile(SONIC_GENERATED_SERVICE_PATH): return None @@ -644,11 +586,11 @@ def save(filename): """Export current config DB to a file on disk.\n : Names of configuration file(s) to save, separated by comma with no spaces in between """ - num_asic = _get_num_asic() + num_asic = sonic_device_util.get_num_npus() cfg_files = [] num_cfg_file = 1 - if is_multi_asic(): + if sonic_device_util.is_multi_npu(): num_cfg_file += num_asic # If the user give the filename[s], extract the file names. @@ -701,11 +643,11 @@ def load(filename, yes): if not yes: click.confirm(message, abort=True) - num_asic = _get_num_asic() + num_asic = sonic_device_util.get_num_npus() cfg_files = [] num_cfg_file = 1 - if is_multi_asic(): + if sonic_device_util.is_multi_npu(): num_cfg_file += num_asic # If the user give the filename[s], extract the file names. @@ -767,11 +709,11 @@ def reload(filename, yes, load_sysinfo): log_info("'reload' executing...") - num_asic = _get_num_asic() + num_asic = sonic_device_util.get_num_npus() cfg_files = [] num_cfg_file = 1 - if is_multi_asic(): + if sonic_device_util.is_multi_npu(): num_cfg_file += num_asic # If the user give the filename[s], extract the file names. @@ -1065,9 +1007,6 @@ def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): """ Add mirror session """ - config_db = ConfigDBConnector() - config_db.connect() - session_info = { "src_ip": src_ip, "dst_ip": dst_ip, @@ -1083,8 +1022,21 @@ def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): if queue is not None: session_info['queue'] = queue - - config_db.set_entry("MIRROR_SESSION", session_name, session_info) + + """ + For multi-npu platforms we need to program all front asic namespaces + """ + namespaces = sonic_device_util.get_all_namespaces() + if not namespaces['front_ns']: + config_db = ConfigDBConnector() + config_db.connect() + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + else: + per_npu_configdb = {} + for front_asic_namespaces in namespaces['front_ns']: + per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces].connect() + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) @mirror_session.command() @click.argument('session_name', metavar='', required=True) @@ -1092,10 +1044,21 @@ def remove(session_name): """ Delete mirror session """ - config_db = ConfigDBConnector() - config_db.connect() - config_db.set_entry("MIRROR_SESSION", session_name, None) + """ + For multi-npu platforms we need to program all front asic namespaces + """ + namespaces = sonic_device_util.get_all_namespaces() + if not namespaces['front_ns']: + config_db = ConfigDBConnector() + config_db.connect() + config_db.set_entry("MIRROR_SESSION", session_name, None) + else: + per_npu_configdb = {} + for front_asic_namespaces in namespaces['front_ns']: + per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces].connect() + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) # # 'pfcwd' group ('config pfcwd ...') # @@ -1202,8 +1165,8 @@ def reload(): """Reload QoS configuration""" log_info("'qos reload' executing...") _clear_qos() - platform = _get_platform() - hwsku = _get_hwsku() + platform = sonic_device_util.get_platform() + hwsku = sonic_device_util.get_hwsku() buffer_template_file = os.path.join('/usr/share/sonic/device/', platform, hwsku, 'buffers.json.j2') if os.path.isfile(buffer_template_file): command = "{} -d -t {} >/tmp/buffers.json".format(SONIC_CFGGEN_PATH, buffer_template_file) From 03cd9ef1904303701da66ccaf982928db9d7e9c0 Mon Sep 17 00:00:00 2001 From: bsun-sudo <56011247+bsun-sudo@users.noreply.github.com> Date: Thu, 14 May 2020 14:01:28 -0700 Subject: [PATCH 071/111] Fix is_mgmt_vrf_enabled when MGMT_VRF_CONFIG is not present the config DB (#885) Fix is_mgmt_vrf_enabled for the case where MGMT_VRF_CONFIG is not at all in the config DB. This is the case where mgmt vrf is never configured. The function throws error at File "/usr/lib/python2.7/dist-packages/show/main.py", line 651, in is_mgmt_vrf_enabled mvrf_dict = json.loads(p.stdout.read()) Two show commands uses is_mgmt_vrf_enabled. "show mgmt-vrf" and "show ntp" Both commands throw error if mgmt vrf is never configured Co-authored-by: Bing Sun --- show/main.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/show/main.py b/show/main.py index 363895fce1..de57cf3229 100755 --- a/show/main.py +++ b/show/main.py @@ -636,16 +636,19 @@ def is_mgmt_vrf_enabled(ctx): cmd = 'sonic-cfggen -d --var-json "MGMT_VRF_CONFIG"' p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout = p.communicate()[0] - if p.returncode == 0: - mvrf_dict = json.loads(stdout) - - # if the mgmtVrfEnabled attribute is configured, check the value - # and return True accordingly. - if 'mgmtVrfEnabled' in mvrf_dict['vrf_global']: - if (mvrf_dict['vrf_global']['mgmtVrfEnabled'] == "true"): - #ManagementVRF is enabled. Return True. - return True + try : + mvrf_dict = json.loads(p.stdout.read()) + except ValueError: + print("MGMT_VRF_CONFIG is not present.") + return False + + # if the mgmtVrfEnabled attribute is configured, check the value + # and return True accordingly. + if 'mgmtVrfEnabled' in mvrf_dict['vrf_global']: + if (mvrf_dict['vrf_global']['mgmtVrfEnabled'] == "true"): + #ManagementVRF is enabled. Return True. + return True + return False # From 8698b3c525cbd423ac0aaf0d0f07913cdbd8d493 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Wed, 20 May 2020 07:42:23 -0700 Subject: [PATCH 072/111] Changes to commands config reload/load-minigraph (#919) * Changes to ignore services stop/start for config reload and load_minigraph command. This will be needed when executed from config-setup script. * Updated Command Reference document * Address Review Comments. --- config/main.py | 32 +++++++++++++++++++------------- doc/Command-Reference.md | 12 ++++++++++-- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/config/main.py b/config/main.py index 8e110cd21d..93b0e4d90b 100755 --- a/config/main.py +++ b/config/main.py @@ -694,8 +694,9 @@ def load(filename, yes): @config.command() @click.option('-y', '--yes', is_flag=True) @click.option('-l', '--load-sysinfo', is_flag=True, help='load system default information (mac, portmap etc) first.') +@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @click.argument('filename', required=False) -def reload(filename, yes, load_sysinfo): +def reload(filename, yes, load_sysinfo, no_service_restart): """Clear current configuration and import a previous saved config DB dump file. : Names of configuration file(s) to load, separated by comma with no spaces in between """ @@ -735,8 +736,9 @@ def reload(filename, yes, load_sysinfo): cfg_hwsku = cfg_hwsku.strip() #Stop services before config push - log_info("'reload' stopping services...") - _stop_services() + if not no_service_restart: + log_info("'reload' stopping services...") + _stop_services() """ In Single AISC platforms we have single DB service. In multi-ASIC platforms we have a global DB service running in the host + DB services running in each ASIC namespace created per ASIC. @@ -808,9 +810,10 @@ def reload(filename, yes, load_sysinfo): # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them - _reset_failed_services() - log_info("'reload' restarting services...") - _restart_services() + if not no_service_restart: + _reset_failed_services() + log_info("'reload' restarting services...") + _restart_services() @config.command("load_mgmt_config") @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, @@ -840,7 +843,8 @@ def load_mgmt_config(filename): @config.command("load_minigraph") @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Reload config from minigraph?') -def load_minigraph(): +@click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') +def load_minigraph(no_service_restart): """Reconfigure based on minigraph.""" log_info("'load_minigraph' executing...") @@ -855,8 +859,9 @@ def load_minigraph(): device_type = device_type.strip() #Stop services before config push - log_info("'load_minigraph' stopping services...") - _stop_services() + if not no_service_restart: + log_info("'load_minigraph' stopping services...") + _stop_services() # For Single Asic platform the namespace list has the empty string # for mulit Asic platform the empty string to generate the config @@ -901,10 +906,11 @@ def load_minigraph(): # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them - _reset_failed_services() - #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. - log_info("'load_minigraph' restarting services...") - _restart_services() + if not no_service_restart: + _reset_failed_services() + #FIXME: After config DB daemon is implemented, we'll no longer need to restart every service. + log_info("'load_minigraph' restarting services...") + _restart_services() click.echo("Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`.") diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 384c1f357e..2e2a57306f 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3269,9 +3269,13 @@ NOTE: Management interface IP address and default route (or specific route) may When user specifies the optional argument "-y" or "--yes", this command forces the loading without prompting the user for confirmation. If the argument is not specified, it prompts the user to confirm whether user really wants to load this configuration file. +When user specifies the optional argument "-n" or "--no-service-restart", this command loads the configuration without restarting dependent services +running on the device. One use case for this option is during boot time when config-setup service loads minigraph configuration and there is no services +running on the device. + - Usage: ``` - config load_minigraph [-y|--yes] + config load_minigraph [-y|--yes] [-n|--no-service-restart] ``` - Example: @@ -3304,9 +3308,13 @@ NOTE: Management interface IP address and default route (or specific route) may When user specifies the optional argument "-y" or "--yes", this command forces the loading without prompting the user for confirmation. If the argument is not specified, it prompts the user to confirm whether user really wants to load this configuration file. +When user specifies the optional argument "-n" or "--no-service-restart", this command clear and loads the configuration without restarting dependent services +running on the device. One use case for this option is during boot time when config-setup service loads existing old configuration and there is no services +running on the device. + - Usage: ``` - config reload [-y|--yes] [-l|--load-sysinfo] [] + config reload [-y|--yes] [-l|--load-sysinfo] [] [-n|--no-service-restart] ``` - Example: From b3b444a330371d3d6cf42dd83241a2083efe0b73 Mon Sep 17 00:00:00 2001 From: Preetham <51771885+preetham-singh@users.noreply.github.com> Date: Thu, 21 May 2020 06:14:20 +0530 Subject: [PATCH 073/111] Loopback interface configuration CLIs (#879) * Adding configuration CLIs for loopback interfaces --- config/main.py | 70 +++++++++++++ doc/Command-Reference.md | 208 ++++++++++++++++++++++++++++++++++----- 2 files changed, 251 insertions(+), 27 deletions(-) diff --git a/config/main.py b/config/main.py index 93b0e4d90b..a02b0c2e77 100755 --- a/config/main.py +++ b/config/main.py @@ -36,6 +36,11 @@ SYSTEMCTL_ACTION_RESET_FAILED="reset-failed" DEFAULT_NAMESPACE = '' +CFG_LOOPBACK_PREFIX = "Loopback" +CFG_LOOPBACK_PREFIX_LEN = len(CFG_LOOPBACK_PREFIX) +CFG_LOOPBACK_NAME_TOTAL_LEN_MAX = 11 +CFG_LOOPBACK_ID_MAX_VAL = 999 +CFG_LOOPBACK_NO="<0-999>" # ========================== Syslog wrappers ========================== def log_debug(msg): @@ -2613,6 +2618,71 @@ def naming_mode_alias(): """Set CLI interface naming mode to ALIAS (Vendor port alias)""" set_interface_naming_mode('alias') +@config.group() +def is_loopback_name_valid(loopback_name): + """Loopback name validation + """ + + if loopback_name[:CFG_LOOPBACK_PREFIX_LEN] != CFG_LOOPBACK_PREFIX : + return False + if (loopback_name[CFG_LOOPBACK_PREFIX_LEN:].isdigit() is False or + int(loopback_name[CFG_LOOPBACK_PREFIX_LEN:]) > CFG_LOOPBACK_ID_MAX_VAL) : + return False + if len(loopback_name) > CFG_LOOPBACK_NAME_TOTAL_LEN_MAX: + return False + return True + +# +# 'loopback' group ('config loopback ...') +# +@config.group() +@click.pass_context +@click.option('-s', '--redis-unix-socket-path', help='unix socket path for redis connection') +def loopback(ctx, redis_unix_socket_path): + """Loopback-related configuration tasks""" + kwargs = {} + if redis_unix_socket_path: + kwargs['unix_socket_path'] = redis_unix_socket_path + config_db = ConfigDBConnector(**kwargs) + config_db.connect(wait_for_init=False) + ctx.obj = {'db': config_db} + +@loopback.command('add') +@click.argument('loopback_name', metavar='', required=True) +@click.pass_context +def add_loopback(ctx, loopback_name): + config_db = ctx.obj['db'] + if is_loopback_name_valid(loopback_name) is False: + ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' " + .format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO)) + + lo_intfs = [k for k,v in config_db.get_table('LOOPBACK_INTERFACE').iteritems() if type(k) != tuple] + if loopback_name in lo_intfs: + ctx.fail("{} already exists".format(loopback_name)) + + config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, {"NULL" : "NULL"}) + +@loopback.command('del') +@click.argument('loopback_name', metavar='', required=True) +@click.pass_context +def del_loopback(ctx, loopback_name): + config_db = ctx.obj['db'] + if is_loopback_name_valid(loopback_name) is False: + ctx.fail("{} is invalid, name should have prefix '{}' and suffix '{}' " + .format(loopback_name, CFG_LOOPBACK_PREFIX, CFG_LOOPBACK_NO)) + + lo_config_db = config_db.get_table('LOOPBACK_INTERFACE') + lo_intfs = [k for k,v in lo_config_db.iteritems() if type(k) != tuple] + if loopback_name not in lo_intfs: + ctx.fail("{} does not exists".format(loopback_name)) + + ips = [ k[1] for k in lo_config_db if type(k) == tuple and k[0] == loopback_name ] + for ip in ips: + config_db.set_entry('LOOPBACK_INTERFACE', (loopback_name, ip), None) + + config_db.set_entry('LOOPBACK_INTERFACE', loopback_name, None) + + @config.group(cls=AbbreviationGroup) def ztp(): """ Configure Zero Touch Provisioning """ diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 2e2a57306f..a6598b717e 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -49,6 +49,9 @@ * [Interface Naming Mode](#interface-naming-mode) * [Interface naming mode show commands](#interface-naming-mode-show-commands) * [Interface naming mode config commands](#interface-naming-mode-config-commands) + * [Interface Vrf binding](#interface-vrf-binding) + * [Interface vrf bind & unbind config commands](#interface-vrf-bind-&-unbind-config-commands) + * [Interface vrf binding show commands](#interface-vrf-binding-show-commands) * [IP / IPv6](#ip--ipv6) * [IP show commands](#ip-show-commands) * [IPv6 show commands](#ipv6-show-commands) @@ -60,6 +63,11 @@ * [Reloading Configuration](#reloading-configuration) * [Loading Management Configuration](#loading-management-configuration) * [Saving Configuration to a File for Persistence](saving-configuration-to-a-file-for-persistence) + * [Loopback Interfaces](#loopback-interfaces) + * [Loopback config commands](#loopback-config-commands) +* [VRF Configuration](#vrf-configuration) + * [VRF show commands](#vrf-show-commands) + * [VRF config commands](#vrf-config-commands) * [Management VRF](#Management-VRF) * [Management VRF Show commands](#management-vrf-show-commands) * [Management VRF Config commands](#management-vrf-config-commands) @@ -271,15 +279,18 @@ This command lists all the possible configuration commands at the top level. load Import a previous saved config DB dump file. load_mgmt_config Reconfigure hostname and mgmt interface based... load_minigraph Reconfigure based on minigraph. + loopback Loopback-related configuration tasks. mirror_session nat NAT-related configuration tasks platform Platform-related configuration tasks portchannel qos reload Clear current configuration and import a... + route route-related configuration tasks save Export current config DB to a file on disk. tacacs TACACS+ server configuration vlan VLAN-related configuration tasks + vrf VRF-related configuration tasks warm_restart warm_restart-related configuration tasks watermark Configure watermark container Modify configuration of containers @@ -342,6 +353,7 @@ This command displays the full list of show commands available in the software; users Show users version Show version information vlan Show VLAN information + vrf Show vrf config warm_restart Show warm restart configuration and state watermark Show details of watermark container Show details of container @@ -2440,7 +2452,6 @@ This command displays the key fields of the interfaces such as Operational Statu Ethernet4 down up hundredGigE1/2 T0-2:hundredGigE1/30 ``` - **show interfaces naming_mode** Refer sub-section [Interface-Naming-Mode](#Interface-Naming-Mode) @@ -2544,7 +2555,7 @@ NOTE: In older versions of SONiC until 201811 release, the command syntax was `c **config interface ip add (Versions <= 201811)** This command is used for adding the IP address for an interface. -IP address for either physical interface or for portchannel or for VLAN interface can be configured using this command. +IP address for either physical interface or for portchannel or for VLAN interface or for Loopback interface can be configured using this command. While configuring the IP address for the management interface "eth0", users can provide the default gateway IP address as an optional parameter from release 201911. @@ -2900,6 +2911,35 @@ The user must log out and log back in for changes to take effect. Note that the Go Back To [Beginning of the document](#) or [Beginning of this section](#interface-naming-mode) +## Interface Vrf binding + +### Interface vrf bind & unbind config commands + +**config interface vrf bind** + +This command is used to bind a interface to a vrf. +By default, all L3 interfaces will be in default vrf. Above vrf bind command will let users bind interface to a vrf. + +- Usage: + ``` + config interface vrf bind + ``` + +**config interface vrf unbind** + +This command is used to ubind a interface from a vrf. +This will move the interface to default vrf. + +- Usage: + ``` + config interface vrf unbind + ``` + + ### Interface vrf binding show commands + + To display interface vrf binding information, user can use show vrf command. Please refer sub-section [Vrf-show-command](#vrf-show-commands). + +Go Back To [Beginning of the document](#) or [Beginning of this section](#interface-vrf-binding) ## IP / IPv6 @@ -2918,7 +2958,7 @@ This command displays either all the route entries from the routing table or a s - Usage: ``` - show ip route [] + show ip route [] [] ``` - Example: @@ -2929,12 +2969,9 @@ This command displays either all the route entries from the routing table or a s > - selected route, * - FIB route S>* 0.0.0.0/0 [200/0] via 10.11.162.254, eth0 C>* 1.1.0.0/16 is directly connected, Vlan100 - C>* 10.1.0.1/32 is directly connected, lo - C>* 10.1.0.32/32 is directly connected, lo C>* 10.1.1.0/31 is directly connected, Ethernet112 C>* 10.1.1.2/31 is directly connected, Ethernet116 C>* 10.11.162.0/24 is directly connected, eth0 - C>* 10.12.0.102/32 is directly connected, lo C>* 127.0.0.0/8 is directly connected, lo C>* 240.127.1.0/24 is directly connected, docker0 ``` @@ -2949,6 +2986,27 @@ This command displays either all the route entries from the routing table or a s * directly connected, Ethernet112 ``` + - Vrf-name can also be specified to get IPv4 routes programmed in the vrf. + + - Example: + ``` + admin@sonic:~$ show ip route vrf Vrf-red + Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route + VRF Vrf-red: + C>* 11.1.1.1/32 is directly connected, Loopback11, 21:50:47 + C>* 100.1.1.0/24 is directly connected, Vlan100, 03w1d06h + + admin@sonic:~$ show ip route vrf Vrf-red 11.1.1.1/32 + Routing entry for 11.1.1.1/32 + Known via "connected", distance 0, metric 0, vrf Vrf-red, best + Last update 21:57:53 ago + * directly connected, Loopback11 + ``` + **show ip interfaces** This command displays the details about all the Layer3 IP interfaces in the device for which IP address has been assigned. @@ -2968,16 +3026,20 @@ The type of interfaces include the following. - Example: ``` admin@sonic:~$ show ip interfaces - Interface IPv4 address/mask Admin/Oper BGP Neighbor Neighbor IP - ------------- ------------------- ------------ -------------- ------------- - PortChannel01 10.0.0.56/31 up/down DEVICE1 10.0.0.57 - PortChannel02 10.0.0.58/31 up/down DEVICE2 10.0.0.59 - PortChannel03 10.0.0.60/31 up/down DEVICE3 10.0.0.61 - PortChannel04 10.0.0.62/31 up/down DEVICE4 10.0.0.63 - Vlan1000 192.168.0.1/27 up/up N/A N/A - docker0 240.127.1.1/24 up/down N/A N/A - eth0 10.3.147.252/23 up/up N/A N/A - lo 127.0.0.1/8 up/up N/A N/A + Interface Master IPv4 address/mask Admin/Oper BGP Neighbor Neighbor IP Flags + ------------- ------------ ------------------ -------------- ------------- ------------- ------- + Loopback0 1.0.0.1/32 up/up N/A N/A + Loopback11 Vrf-red 11.1.1.1/32 up/up N/A N/A + Loopback100 Vrf-blue 100.0.0.1/32 up/up N/A N/A + PortChannel01 10.0.0.56/31 up/down DEVICE1 10.0.0.57 + PortChannel02 10.0.0.58/31 up/down DEVICE2 10.0.0.59 + PortChannel03 10.0.0.60/31 up/down DEVICE3 10.0.0.61 + PortChannel04 10.0.0.62/31 up/down DEVICE4 10.0.0.63 + Vlan100 Vrf-red 1001.1.1/24 up/up N/A N/A + Vlan1000 192.168.0.1/27 up/up N/A N/A + docker0 240.127.1.1/24 up/down N/A N/A + eth0 10.3.147.252/23 up/up N/A N/A + lo 127.0.0.1/8 up/up N/A N/A ``` **show ip protocol** @@ -3026,7 +3088,7 @@ This command displays either all the IPv6 route entries from the routing table o - Usage: ``` - show ipv6 route [] + show ipv6 route [] [] ``` - Example: @@ -3060,6 +3122,29 @@ This command displays either all the IPv6 route entries from the routing table o * directly connected, lo ``` + Vrf-name can also be specified to get IPv6 routes programmed in the vrf. + + - Example: + ``` + admin@sonic:~$ show ipv6 route vrf Vrf-red + Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route + VRF Vrf-red: + C>* 1100::1/128 is directly connected, Loopback11, 21:50:47 + C>* 100::/112 is directly connected, Vlan100, 03w1d06h + C>* fe80::/64 is directly connected, Loopback11, 21:50:47 + C>* fe80::/64 is directly connected, Vlan100, 03w1d06h + + admin@sonic:~$ show ipv6 route vrf Vrf-red 1100::1/128 + Routing entry for 1100::1/128 + Known via "connected", distance 0, metric 0, vrf Vrf-red, best + Last update 21:57:53 ago + * directly connected, Loopback11 + ``` + **show ipv6 interfaces** This command displays the details about all the Layer3 IPv6 interfaces in the device for which IPv6 address has been assigned. @@ -3078,16 +3163,18 @@ The type of interfaces include the following. - Example: ``` admin@sonic:~$ show ipv6 interfaces - Interface IPv6 address/mask Admin/Oper BGP Neighbor Neighbor IP - ------------- ---------------------------------------- ------------ -------------- ------------- - Bridge fe80::7c45:1dff:fe08:cdd%Bridge/64 up/up N/A N/A - PortChannel01 fc00::71/126 up/down DEVICE1 fc00::72 - PortChannel02 fc00::75/126 up/down DEVICE2 fc00::76 - PortChannel03 fc00::79/126 up/down DEVICE3 fc00::7a - PortChannel04 fc00::7d/126 up/down DEVICE4 fc00::7e - Vlan100 fe80::eef4:bbff:fefe:880a%Vlan100/64 up/up N/A N/A - eth0 fe80::eef4:bbff:fefe:880a%eth0/64 up/up N/A N/A - lo fc00:1::32/128 up/up N/A N/A + Interface Master IPv6 address/mask Admin/Oper BGP Neighbor Neighbor IP + ----------- -------- ---------------------------------------- ------------ -------------- ------------- + Bridge fe80::7c45:1dff:fe08:cdd%Bridge/64 up/up N/A N/A + Loopback11 Vrf-red 1100::1/128 up/up + PortChannel01 fc00::71/126 up/down DEVICE1 fc00::72 + PortChannel02 fc00::75/126 up/down DEVICE2 fc00::76 + PortChannel03 fc00::79/126 up/down DEVICE3 fc00::7a + PortChannel04 fc00::7d/126 up/down DEVICE4 fc00::7e + Vlan100 Vrf-red 100::1/112 up/up N/A N/A + fe80::eef4:bbff:fefe:880a%Vlan100/64 + eth0 fe80::eef4:bbff:fefe:880a%eth0/64 up/up N/A N/A + lo fc00:1::32/128 up/up N/A N/A ``` **show ipv6 protocol** @@ -3386,6 +3473,73 @@ Saved file can be transferred to remote machines for debugging. If users wants t Go Back To [Beginning of the document](#) or [Beginning of this section](#loading-reloading-and-saving-configuration) +## Loopback Interfaces + +### Loopback Config commands + +This sub-section explains how to create and delete loopback interfaces. + +**config interface loopback** + +This command is used to add or delete loopback interfaces. +It is recommended to use loopback names in the format "Loopbackxxx", where "xxx" is number of 1 to 3 digits. Ex: "Loopback11". + +- Usage: + ``` + config loopback (add | del) + ``` + +- Example (Create the loopback with name "Loopback11"): + ``` + admin@sonic:~$ sudo config loopback add Loopback11 + ``` + +## VRF Configuration + +### VRF show commands + +**show vrf** + +This command displays all vrfs configured on the system along with interface binding to the vrf. +If vrf-name is also provided as part of the command, if the vrf is created it will display all interfaces binding to the vrf, if vrf is not created nothing will be displayed. + +- Usage: + ``` + show vrf [] + ``` + +- Example: + ```` + admin@sonic:~$ show vrf + VRF Interfaces + ------- ------------ + default Vlan20 + Vrf-red Vlan100 + Loopback11 + Vrf-blue Loopback100 + Loopback102 + ```` + +### VRF config commands + +**config vrf add ** + +This command creates vrf in SONiC system with provided vrf-name. + +- Usage: + ``` +config vrf add +``` +Note: vrf-name should always start with keyword "Vrf" + +**config vrf del ** + +This command deletes vrf with name vrf-name. + +- Usage: + ``` +config vrf del +``` ## Management VRF From 89b87c014c1b80d85ab838ad63f175e2571a33ab Mon Sep 17 00:00:00 2001 From: judyjoseph <53951155+judyjoseph@users.noreply.github.com> Date: Wed, 20 May 2020 22:13:43 -0700 Subject: [PATCH 074/111] Multi-asic changes for config bgp commands and utilities. (#910) * Multi-asic changes for config bgp commands and utilities. * Review comments update * Optimized the logic of checking the internal hosts using AS number. * API changes due to rebase. Also setting ignore_local_hosts=false explicitly in BGP commands so that existing single ASIC platform implementation is unchanged. --- config/main.py | 175 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 133 insertions(+), 42 deletions(-) diff --git a/config/main.py b/config/main.py index a02b0c2e77..30fc4cd5c4 100755 --- a/config/main.py +++ b/config/main.py @@ -356,88 +356,95 @@ def get_interface_naming_mode(): mode = "default" return mode -def _is_neighbor_ipaddress(ipaddress): +# Get the local BGP ASN from DEVICE_METADATA +def get_local_bgp_asn(config_db): + metadata = config_db.get_table('DEVICE_METADATA') + return metadata['localhost']['bgp_asn'] + +def _is_neighbor_ipaddress(config_db, ipaddress): """Returns True if a neighbor has the IP address , False if not """ - config_db = ConfigDBConnector() - config_db.connect() entry = config_db.get_entry('BGP_NEIGHBOR', ipaddress) return True if entry else False -def _get_all_neighbor_ipaddresses(): +def _get_all_neighbor_ipaddresses(config_db, ignore_local_hosts=False): """Returns list of strings containing IP addresses of all BGP neighbors + if the flag ignore_local_hosts is set to True, additional check to see if + if the BGP neighbor AS number is same as local BGP AS number, if so ignore that neigbor. """ - config_db = ConfigDBConnector() - config_db.connect() - return config_db.get_table('BGP_NEIGHBOR').keys() + addrs = [] + bgp_sessions = config_db.get_table('BGP_NEIGHBOR') + local_as = get_local_bgp_asn(config_db) + for addr, session in bgp_sessions.iteritems(): + if not ignore_local_hosts or (ignore_local_hosts and local_as != session['asn']): + addrs.append(addr) + return addrs -def _get_neighbor_ipaddress_list_by_hostname(hostname): +def _get_neighbor_ipaddress_list_by_hostname(config_db, hostname): """Returns list of strings, each containing an IP address of neighbor with hostname . Returns empty list if not a neighbor """ addrs = [] - config_db = ConfigDBConnector() - config_db.connect() bgp_sessions = config_db.get_table('BGP_NEIGHBOR') for addr, session in bgp_sessions.iteritems(): if session.has_key('name') and session['name'] == hostname: addrs.append(addr) return addrs -def _change_bgp_session_status_by_addr(ipaddress, status, verbose): +def _change_bgp_session_status_by_addr(config_db, ipaddress, status, verbose): """Start up or shut down BGP session by IP address """ verb = 'Starting' if status == 'up' else 'Shutting' click.echo("{} {} BGP session with neighbor {}...".format(verb, status, ipaddress)) - config_db = ConfigDBConnector() - config_db.connect() config_db.mod_entry('bgp_neighbor', ipaddress, {'admin_status': status}) -def _change_bgp_session_status(ipaddr_or_hostname, status, verbose): +def _change_bgp_session_status(config_db, ipaddr_or_hostname, status, verbose): """Start up or shut down BGP session by IP address or hostname """ ip_addrs = [] # If we were passed an IP address, convert it to lowercase because IPv6 addresses were # stored in ConfigDB with all lowercase alphabet characters during minigraph parsing - if _is_neighbor_ipaddress(ipaddr_or_hostname.lower()): + if _is_neighbor_ipaddress(config_db, ipaddr_or_hostname.lower()): ip_addrs.append(ipaddr_or_hostname.lower()) else: # If is not the IP address of a neighbor, check to see if it's a hostname - ip_addrs = _get_neighbor_ipaddress_list_by_hostname(ipaddr_or_hostname) + ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, ipaddr_or_hostname) if not ip_addrs: - click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname)) + return False for ip_addr in ip_addrs: - _change_bgp_session_status_by_addr(ip_addr, status, verbose) + _change_bgp_session_status_by_addr(config_db, ip_addr, status, verbose) + + return True -def _validate_bgp_neighbor(neighbor_ip_or_hostname): +def _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname): """validates whether the given ip or host name is a BGP neighbor """ ip_addrs = [] - if _is_neighbor_ipaddress(neighbor_ip_or_hostname.lower()): + if _is_neighbor_ipaddress(config_db, neighbor_ip_or_hostname.lower()): ip_addrs.append(neighbor_ip_or_hostname.lower()) else: - ip_addrs = _get_neighbor_ipaddress_list_by_hostname(neighbor_ip_or_hostname.upper()) - - if not ip_addrs: - click.get_current_context().fail("Could not locate neighbor '{}'".format(neighbor_ip_or_hostname)) + ip_addrs = _get_neighbor_ipaddress_list_by_hostname(config_db, neighbor_ip_or_hostname.upper()) return ip_addrs -def _remove_bgp_neighbor_config(neighbor_ip_or_hostname): +def _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname): """Removes BGP configuration of the given neighbor """ - ip_addrs = _validate_bgp_neighbor(neighbor_ip_or_hostname) - config_db = ConfigDBConnector() - config_db.connect() + ip_addrs = _validate_bgp_neighbor(config_db, neighbor_ip_or_hostname) + + if not ip_addrs: + return False for ip_addr in ip_addrs: config_db.mod_entry('bgp_neighbor', ip_addr, None) click.echo("Removed configuration of BGP neighbor {}".format(ip_addr)) + return True + def _change_hostname(hostname): current_hostname = os.uname()[1] if current_hostname != hostname: @@ -1664,20 +1671,53 @@ def num_dumps(kdump_num_dumps): @shutdown.command() @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def all(verbose): - """Shut down all BGP sessions""" + """Shut down all BGP sessions + In the case of Multi-Asic platform, we shut only the EBGP sessions with external neighbors. + """ log_info("'bgp shutdown all' executing...") - bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses() - for ipaddress in bgp_neighbor_ip_list: - _change_bgp_session_status_by_addr(ipaddress, 'down', verbose) + namespaces = [DEFAULT_NAMESPACE] + ignore_local_hosts = False + + if sonic_device_util.is_multi_npu(): + ns_list = sonic_device_util.get_all_namespaces() + namespaces = ns_list['front_ns'] + ignore_local_hosts = True + + # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the + # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) + for namespace in namespaces: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db, ignore_local_hosts) + for ipaddress in bgp_neighbor_ip_list: + _change_bgp_session_status_by_addr(config_db, ipaddress, 'down', verbose) # 'neighbor' subcommand @shutdown.command() @click.argument('ipaddr_or_hostname', metavar='', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): - """Shut down BGP session by neighbor IP address or hostname""" + """Shut down BGP session by neighbor IP address or hostname. + User can specify either internal or external BGP neighbor to shutdown + """ log_info("'bgp shutdown neighbor {}' executing...".format(ipaddr_or_hostname)) - _change_bgp_session_status(ipaddr_or_hostname, 'down', verbose) + namespaces = [DEFAULT_NAMESPACE] + found_neighbor = False + + if sonic_device_util.is_multi_npu(): + ns_list = sonic_device_util.get_all_namespaces() + namespaces = ns_list['front_ns'] + ns_list['back_ns'] + + # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the + # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) + for namespace in namespaces: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'down', verbose): + found_neighbor = True + + if not found_neighbor: + click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname)) @bgp.group(cls=AbbreviationGroup) def startup(): @@ -1688,20 +1728,53 @@ def startup(): @startup.command() @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def all(verbose): - """Start up all BGP sessions""" + """Start up all BGP sessions + In the case of Multi-Asic platform, we startup only the EBGP sessions with external neighbors. + """ log_info("'bgp startup all' executing...") - bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses() - for ipaddress in bgp_neighbor_ip_list: - _change_bgp_session_status(ipaddress, 'up', verbose) + namespaces = [DEFAULT_NAMESPACE] + ignore_local_hosts = False + + if sonic_device_util.is_multi_npu(): + ns_list = sonic_device_util.get_all_namespaces() + namespaces = ns_list['front_ns'] + ignore_local_hosts = True + + # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the + # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) + for namespace in namespaces: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + bgp_neighbor_ip_list = _get_all_neighbor_ipaddresses(config_db, ignore_local_hosts) + for ipaddress in bgp_neighbor_ip_list: + _change_bgp_session_status_by_addr(config_db, ipaddress, 'up', verbose) # 'neighbor' subcommand @startup.command() @click.argument('ipaddr_or_hostname', metavar='', required=True) @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") def neighbor(ipaddr_or_hostname, verbose): - """Start up BGP session by neighbor IP address or hostname""" log_info("'bgp startup neighbor {}' executing...".format(ipaddr_or_hostname)) - _change_bgp_session_status(ipaddr_or_hostname, 'up', verbose) + """Start up BGP session by neighbor IP address or hostname. + User can specify either internal or external BGP neighbor to startup + """ + namespaces = [DEFAULT_NAMESPACE] + found_neighbor = False + + if sonic_device_util.is_multi_npu(): + ns_list = sonic_device_util.get_all_namespaces() + namespaces = ns_list['front_ns'] + ns_list['back_ns'] + + # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the + # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) + for namespace in namespaces: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + if _change_bgp_session_status(config_db, ipaddr_or_hostname, 'up', verbose): + found_neighbor = True + + if not found_neighbor: + click.get_current_context().fail("Could not locate neighbor '{}'".format(ipaddr_or_hostname)) # # 'remove' subgroup ('config bgp remove ...') @@ -1715,8 +1788,26 @@ def remove(): @remove.command('neighbor') @click.argument('neighbor_ip_or_hostname', metavar='', required=True) def remove_neighbor(neighbor_ip_or_hostname): - """Deletes BGP neighbor configuration of given hostname or ip from devices""" - _remove_bgp_neighbor_config(neighbor_ip_or_hostname) + """Deletes BGP neighbor configuration of given hostname or ip from devices + User can specify either internal or external BGP neighbor to remove + """ + namespaces = [DEFAULT_NAMESPACE] + removed_neighbor = False + + if sonic_device_util.is_multi_npu(): + ns_list = sonic_device_util.get_all_namespaces() + namespaces = ns_list['front_ns'] + ns_list['back_ns'] + + # Connect to CONFIG_DB in linux host (in case of single ASIC) or CONFIG_DB in all the + # namespaces (in case of multi ASIC) and do the sepcified "action" on the BGP neighbor(s) + for namespace in namespaces: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_db.connect() + if _remove_bgp_neighbor_config(config_db, neighbor_ip_or_hostname): + removed_neighbor = True + + if not removed_neighbor: + click.get_current_context().fail("Could not locate neighbor '{}'".format(neighbor_ip_or_hostname)) # # 'interface' group ('config interface ...') From 5b032f968b623c50154f14781a61690b16b2995f Mon Sep 17 00:00:00 2001 From: carycelestica <39321413+carycelestica@users.noreply.github.com> Date: Tue, 26 May 2020 07:30:00 +0800 Subject: [PATCH 075/111] Add common PCIe diag tool (#771) * Add [show platform pcieinfo] command * Add pcieutil moudle * Add pcietul package * make up for mistake in setup.py * make up the mistake for show/main.py --- pcieutil/__init__.py | 0 pcieutil/main.py | 199 +++++++++++++++++++++++++++++++++++++++++++ setup.py | 2 + show/main.py | 26 ++++-- 4 files changed, 219 insertions(+), 8 deletions(-) create mode 100644 pcieutil/__init__.py create mode 100644 pcieutil/main.py diff --git a/pcieutil/__init__.py b/pcieutil/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pcieutil/main.py b/pcieutil/main.py new file mode 100644 index 0000000000..8fddce7979 --- /dev/null +++ b/pcieutil/main.py @@ -0,0 +1,199 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with PCIE in SONiC +# + +try: + import sys + import os + import subprocess + import click + import imp + import syslog + import types + import traceback + from tabulate import tabulate +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +VERSION = '1.0' + +SYSLOG_IDENTIFIER = "pcieutil" +PLATFORM_SPECIFIC_MODULE_NAME = "pcieutil" + +PLATFORM_ROOT_PATH = '/usr/share/sonic/device' +PLATFORM_ROOT_PATH_DOCKER = '/usr/share/sonic/platform' +SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' +HWSKU_KEY = 'DEVICE_METADATA.localhost.hwsku' +PLATFORM_KEY = 'DEVICE_METADATA.localhost.platform' + +#from pcieutil import PcieUtil + +# Global platform-specific psuutil class instance +platform_pcieutil = None +hwsku_path = None + +# ========================== Syslog wrappers ========================== + + +def log_info(msg, also_print_to_console=False): + syslog.openlog(SYSLOG_IDENTIFIER) + syslog.syslog(syslog.LOG_INFO, msg) + syslog.closelog() + + if also_print_to_console: + click.echo(msg) + + +def log_warning(msg, also_print_to_console=False): + syslog.openlog(SYSLOG_IDENTIFIER) + syslog.syslog(syslog.LOG_WARNING, msg) + syslog.closelog() + + if also_print_to_console: + click.echo(msg) + + +def log_error(msg, also_print_to_console=False): + syslog.openlog(SYSLOG_IDENTIFIER) + syslog.syslog(syslog.LOG_ERR, msg) + syslog.closelog() + + if also_print_to_console: + click.echo(msg) + +def log_out(name, result): + string = "PCI Device: {} ".format(name) + length = 105-len(string) + sys.stdout.write(string) + for i in xrange(int(length)): + sys.stdout.write("-") + print ' [%s]' % result + +# ==================== Methods for initialization ==================== + +# Returns platform and HW SKU +def get_platform_and_hwsku(): + try: + proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-H', '-v', PLATFORM_KEY], + stdout=subprocess.PIPE, + shell=False, + stderr=subprocess.STDOUT) + stdout = proc.communicate()[0] + proc.wait() + platform = stdout.rstrip('\n') + + proc = subprocess.Popen([SONIC_CFGGEN_PATH, '-d', '-v', HWSKU_KEY], + stdout=subprocess.PIPE, + shell=False, + stderr=subprocess.STDOUT) + stdout = proc.communicate()[0] + proc.wait() + hwsku = stdout.rstrip('\n') + except OSError, e: + raise OSError("Cannot detect platform") + + return (platform, hwsku) + + +# Loads platform specific psuutil module from source +def load_platform_pcieutil(): + global platform_pcieutil + global hwsku_plugins_path + # Get platform and hwsku + (platform, hwsku) = get_platform_and_hwsku() + + # Load platform module from source + try: + hwsku_plugins_path = "/".join([PLATFORM_ROOT_PATH, platform, "plugins"]) + sys.path.append(os.path.abspath(hwsku_plugins_path)) + from pcieutil import PcieUtil + except ImportError as e: + log_warning("Fail to load specific PcieUtil moudle. Falling down to the common implementation") + try: + from sonic_platform_base.sonic_pcie.pcie_common import PcieUtil + platform_pcieutil = PcieUtil(hwsku_plugins_path) + except ImportError as e: + log_error("Fail to load default PcieUtil moudle. Error :{}".format(str(e)), True) + raise e + + +# ==================== CLI commands and groups ==================== + + +# This is our main entrypoint - the main 'psuutil' command +@click.group() +def cli(): + """pcieutil - Command line utility for checking pci device""" + if os.geteuid() != 0: + click.echo("Root privileges are required for this operation") + sys.exit(1) + + # Load platform-specific psuutil class + load_platform_pcieutil() + +# 'version' subcommand +@cli.command() +def version(): + """Display version info""" + click.echo("pcieutil version {0}".format(VERSION)) + +#show the platform PCIE info + + +def print_test_title(testname): + click.echo("{name:=^80s}".format(name=testname)) + +# Show PCIE lnkSpeed +@cli.command() +def pcie_show(): + '''Display PCIe Device ''' + testname = "Display PCIe Device" + print_test_title(testname) + resultInfo = platform_pcieutil.get_pcie_device() + for item in resultInfo: + Bus = item["bus"] + Dev = item["dev"] + Fn = item["fn"] + Name = item["name"] + Id = item["id"] + print "bus:dev.fn %s:%s.%s - dev_id=0x%s, %s" % (Bus,Dev,Fn,Id,Name) + + + + + +# Show PCIE Vender ID and Device ID +@cli.command() +def pcie_check(): + '''Check PCIe Device ''' + testname = "PCIe Device Check" + err = 0 + print_test_title(testname) + resultInfo = platform_pcieutil.get_pcie_check() + for item in resultInfo: + if item["result"] == "Passed": + log_out(item["name"], "Passed") + else: + log_out(item["name"], "Failed") + log_warning("PCIe Device: " + item["name"] + " Not Found") + err+=1 + if err: + print "PCIe Device Checking All Test ----------->>> FAILED" + else: + print "PCIe Device Checking All Test ----------->>> PASSED" + + + + +@cli.command() +@click.confirmation_option(prompt="Are you sure to overwrite config file pcie.yaml with current pcie device info?") +def pcie_generate(): + '''Generate config file with current pci device''' + platform_pcieutil.dump_conf_yaml() + print "Generate config file pcie.yaml under path %s" %hwsku_plugins_path + +if __name__ == '__main__': + cli() diff --git a/setup.py b/setup.py index a1fece9118..09e893e3f3 100644 --- a/setup.py +++ b/setup.py @@ -42,6 +42,7 @@ 'pfc', 'psuutil', 'fwutil', + 'pcieutil', 'pddf_fanutil', 'pddf_psuutil', 'pddf_thermalutil', @@ -124,6 +125,7 @@ 'pfc = pfc.main:cli', 'psuutil = psuutil.main:cli', 'fwutil = fwutil.main:cli', + 'pcieutil = pcieutil.main:cli', 'pddf_fanutil = pddf_fanutil.main:cli', 'pddf_psuutil = pddf_psuutil.main:cli', 'pddf_thermalutil = pddf_thermalutil.main:cli', diff --git a/show/main.py b/show/main.py index de57cf3229..45a1e1c185 100755 --- a/show/main.py +++ b/show/main.py @@ -1725,6 +1725,23 @@ def ssdhealth(device, verbose, vendor): options += " -e" if vendor else "" run_command(cmd + options, display_cmd=verbose) +@platform.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +@click.option('-c', '--check', is_flag=True, help="Check the platfome pcie device") +def pcieinfo(check, verbose): + """Show Device PCIe Info""" + cmd = "pcieutil pcie_show" + if check: + cmd = "pcieutil pcie_check" + run_command(cmd, display_cmd=verbose) + +# 'firmware' subcommand ("show platform firmware") +@platform.command() +def firmware(): + """Show firmware status information""" + cmd = "fwutil show status" + run_command(cmd) + # 'fan' subcommand ("show platform fan") @platform.command() def fan(): @@ -1738,14 +1755,7 @@ def temperature(): """Show device temperature information""" cmd = 'tempershow' run_command(cmd) - -# 'firmware' subcommand ("show platform firmware") -@platform.command() -def firmware(): - """Show firmware status information""" - cmd = "fwutil show status" - run_command(cmd) - + # # 'logging' command ("show logging") # From 9765cd0cba3d72a9380267996b2a6631c9a043e8 Mon Sep 17 00:00:00 2001 From: shlomibitton <60430976+shlomibitton@users.noreply.github.com> Date: Tue, 26 May 2020 16:45:37 +0300 Subject: [PATCH 076/111] Add 'hw-management-generate-dump.sh' to 'show techsupport' command (#923) Signed-off-by: Shlomi Bitton --- scripts/generate_dump | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/generate_dump b/scripts/generate_dump index 2611e39d92..3b669ff302 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -511,6 +511,11 @@ main() { fi done + # run 'hw-management-generate-dump.sh' script and save the result file + /usr/bin/hw-management-generate-dump.sh + save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false + rm -f /tmp/hw-mgmt-dump* + # clean up working tar dir before compressing $RM $V -rf $TARDIR From 98ae8ba9e7c088f592bc5d35b6b79dda390c24c3 Mon Sep 17 00:00:00 2001 From: byu343 Date: Thu, 28 May 2020 00:01:16 -0700 Subject: [PATCH 077/111] Revert the change to kdump reboot (#916) This is to revert the change to the reboot command in 7105400. Platform reboot is not supported by the kdump kernel, as the platform drivers are not loaded. With the existing code, there are some error messages printed, but the device is still rebooted by CPU. The change here helps to avoid the error messages. --- scripts/reboot | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/scripts/reboot b/scripts/reboot index 434675bdbf..6a030c15c9 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -1,5 +1,12 @@ #!/bin/bash +# Reboot immediately if we run the kdump capture kernel +VMCORE_FILE=/proc/vmcore +if [ -e $VMCORE_FILE -a -s $VMCORE_FILE ]; then + echo "We have a /proc/vmcore, then we just kdump'ed" + /sbin/reboot +fi + REBOOT_USER=$(logname) REBOOT_TIME=$(date) PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) @@ -18,18 +25,6 @@ function debug() logger "$@" } -# Reboot immediately if we run the kdump capture kernel -VMCORE_FILE=/proc/vmcore -if [ -e $VMCORE_FILE -a -s $VMCORE_FILE ]; then - debug "We have a /proc/vmcore, then we just kdump'ed" - if [ -x ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} ]; then - VERBOSE=yes debug "Rebooting with platform ${PLATFORM} specific tool ..." - exec ${DEVPATH}/${PLATFORM}/${PLAT_REBOOT} - else - /sbin/reboot - fi -fi - function stop_sonic_services() { if [[ x"$ASIC_TYPE" != x"mellanox" ]]; then From 11332d58742395c392a2fab05d76ab102a3bda5c Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Thu, 28 May 2020 11:07:30 -0700 Subject: [PATCH 078/111] Vnet alias mapping (#924) --- show/main.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/show/main.py b/show/main.py index 45a1e1c185..1a6fd80284 100755 --- a/show/main.py +++ b/show/main.py @@ -2989,6 +2989,38 @@ def tablelize(vnet_keys, vnet_data): click.echo(tabulate(tablelize(vnet_keys, vnet_data), header)) +@vnet.command() +@click.argument('vnet_alias', required=False) +def alias(vnet_alias): + """Show vnet alias to name information""" + config_db = ConfigDBConnector() + config_db.connect() + header = ['Alias', 'Name'] + + # Fetching data from config_db for VNET + vnet_data = config_db.get_table('VNET') + vnet_keys = natsorted(vnet_data.keys()) + + def tablelize(vnet_keys, vnet_data, vnet_alias): + table = [] + for k in vnet_keys: + r = [] + if vnet_alias is not None: + if vnet_data[k].get('guid') == vnet_alias: + r.append(vnet_data[k].get('guid')) + r.append(k) + table.append(r) + return table + else: + continue + + r.append(vnet_data[k].get('guid')) + r.append(k) + table.append(r) + return table + + click.echo(tabulate(tablelize(vnet_keys, vnet_data, vnet_alias), header)) + @vnet.command() def interfaces(): """Show vnet interfaces information""" From 861ae35bb87c702ab1777d9a9236289ed07cbedd Mon Sep 17 00:00:00 2001 From: lguohan Date: Thu, 28 May 2020 16:10:09 -0700 Subject: [PATCH 079/111] [showtech]: add knet dump information in show tech (#925) Signed-off-by: Guohan Lu --- scripts/generate_dump | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/scripts/generate_dump b/scripts/generate_dump index 3b669ff302..01e45f801a 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -361,7 +361,7 @@ main() { save_cmd "systemd-analyze blame" "systemd.analyze.blame" save_cmd "systemd-analyze dump" "systemd.analyze.dump" save_cmd "systemd-analyze plot" "systemd.analyze.plot.svg" - + save_platform "syseeprom" "platform" save_platform "psustatus" "platform" save_platform "ssdhealth" "platform" @@ -440,6 +440,12 @@ main() { save_cmd "bcmcmd -t5 version" "broadcom.version" save_cmd "bcmcmd -t5 soc" "broadcom.soc" save_cmd "bcmcmd -t5 ps" "broadcom.ps" + save_cmd "cat /proc/bcm/knet/debug" "broadcom.knet.debug" + save_cmd "cat /proc/bcm/knet/dma" "broadcom.knet.dma" + save_cmd "cat /proc/bcm/knet/dstats" "broadcom.knet.dstats" + save_cmd "cat /proc/bcm/knet/link" "broadcom.knet.link" + save_cmd "cat /proc/bcm/knet/rate" "broadcom.knet.rate" + save_cmd "cat /proc/bcm/knet/stats" "broadcom.knet.stats" save_cmd "bcmcmd \"l3 nat_ingress show\"" "broadcom.nat.ingress" save_cmd "bcmcmd \"l3 nat_egress show\"" "broadcom.nat.egress" fi @@ -512,7 +518,7 @@ main() { done # run 'hw-management-generate-dump.sh' script and save the result file - /usr/bin/hw-management-generate-dump.sh + /usr/bin/hw-management-generate-dump.sh save_file "/tmp/hw-mgmt-dump*" "hw-mgmt" false rm -f /tmp/hw-mgmt-dump* From 410b5737622f6eb59767445c9ae87479811a04e5 Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Fri, 29 May 2020 13:06:13 -0700 Subject: [PATCH 080/111] Changes to make lldp show command for multi-npu platforms. (#914) * Changes to make lldp show command for multi-npu platforms. We will display only front-panel port information. * Address Review Comments. * Added Comment * Fix LGTM error * Address Review Comments --- scripts/lldpshow | 116 ++++++++++++++++++++++++++++++++++++++--------- show/main.py | 4 +- 2 files changed, 97 insertions(+), 23 deletions(-) diff --git a/scripts/lldpshow b/scripts/lldpshow index f15f62f2d6..42d5fa39ac 100755 --- a/scripts/lldpshow +++ b/scripts/lldpshow @@ -25,30 +25,73 @@ import re import sys import xml.etree.ElementTree as ET from tabulate import tabulate +import argparse +import sonic_device_util +from swsssdk import ConfigDBConnector, SonicDBConfig +BACKEND_ASIC_INTERFACE_NAME_PREFIX = 'Ethernet-BP' + +LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE = '' +LLDP_INSTANCE_IN_HOST_NAMESPACE = '' +LLDP_DEFAULT_INTERFACE_LIST_IN_ASIC_NAMESPACE = '' +SPACE_TOKEN = ' ' class Lldpshow(object): def __init__(self): - self.lldpraw = None + self.lldpraw = [] self.lldpsum = {} + self.lldp_interface = [] + self.lldp_instance = [] self.err = None ### So far only find Router and Bridge two capabilities in lldpctl, so any other capacility types will be read as Other ### if further capability type is supported like WLAN, can just add the tag definition here self.ctags = {'Router': 'R', 'Bridge': 'B'} + SonicDBConfig.load_sonic_global_db_config() + + # For multi-asic platforms we will get only front-panel interface to display + namespaces = sonic_device_util.get_all_namespaces() + per_asic_configdb = {} + for instance_num, front_asic_namespaces in enumerate(namespaces['front_ns']): + per_asic_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_asic_configdb[front_asic_namespaces].connect() + # Initalize Interface list to be ''. We will do string append of the interfaces below. + self.lldp_interface.append(LLDP_DEFAULT_INTERFACE_LIST_IN_ASIC_NAMESPACE) + self.lldp_instance.append(instance_num) + keys = per_asic_configdb[front_asic_namespaces].get_keys("PORT") + for key in keys: + if key.startswith(BACKEND_ASIC_INTERFACE_NAME_PREFIX): + continue + self.lldp_interface[instance_num] += key + SPACE_TOKEN - def get_info(self): + # LLDP running in host namespace + self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) + self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) + + def get_info(self, lldp_detail_info, lldp_port): """ - use 'lldpctl -f xml' command to gather local lldp detailed information + use 'lldpctl' command to gather local lldp detailed information """ - lldp_cmd = 'lldpctl -f xml' - p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, shell=True) - (output, err) = p.communicate() - ## Wait for end of command. Get return returncode ## - returncode = p.wait() - ### if no error, get the lldpctl result - if returncode == 0: - self.lldpraw = output - else: - self.err = err + for lldp_instace_num in range(len(self.lldp_instance)): + lldp_interface_list = lldp_port if lldp_port is not None else self.lldp_interface[lldp_instace_num] + # In detail mode we will pass interface list (only front ports) and get O/P as plain text + # and in table format we will get xml output + lldp_cmd = 'sudo docker exec -it lldp{} lldpctl '.format(self.lldp_instance[lldp_instace_num]) + ('-f xml' if not lldp_detail_info else lldp_interface_list) + p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, shell=True) + (output, err) = p.communicate() + ## Wait for end of command. Get return returncode ## + returncode = p.wait() + ### if no error, get the lldpctl result + if returncode == 0: + # ignore the output if given port is not present + if lldp_port is not None and lldp_port not in output: + continue + self.lldpraw.append(output) + if lldp_port is not None: + break + else: + self.err = err + + if self.err: + self.lldpraw = [] def parse_cap(self, capabs): """ @@ -64,15 +107,19 @@ class Lldpshow(object): capability += 'O' return capability - def parse_info(self): + def parse_info(self, lldp_detail_info): """ Parse the lldp detailed infomation into dict """ - if self.lldpraw is not None: - neis = ET.fromstring(self.lldpraw) + if lldp_detail_info: + return + for lldpraw in self.lldpraw: + neis = ET.fromstring(lldpraw) intfs = neis.findall('interface') for intf in intfs: l_intf = intf.attrib['name'] + if l_intf.startswith(BACKEND_ASIC_INTERFACE_NAME_PREFIX): + continue self.lldpsum[l_intf] = {} chassis = intf.find('chassis') capabs = chassis.findall('capability') @@ -97,11 +144,17 @@ class Lldpshow(object): return sorted(summary, key=alphanum_key) - def display_sum(self): + def display_sum(self, lldp_detail_info): """ print out summary result of lldp neighbors """ - if self.lldpraw is not None: + # In detail mode output is plain text + if self.lldpraw and lldp_detail_info: + lldp_output = '' + for lldp_detail_output in self.lldpraw: + lldp_output += lldp_detail_output + print (lldp_output) + elif self.lldpraw: lldpstatus = [] print ('Capability codes: (R) Router, (B) Bridge, (O) Other') header = ['LocalPort', 'RemoteDevice', 'RemotePortID', 'Capability', 'RemotePortDescr'] @@ -115,11 +168,32 @@ class Lldpshow(object): print ('Error:',self.err) def main(): + parser = argparse.ArgumentParser(description='Display the LLDP neighbors', + version='1.0.0', + formatter_class=argparse.RawTextHelpFormatter, + epilog=""" + Examples: + lldpshow + lldpshow -d + lldpshow -d -p Ethernet0 + lldpshow -p Ethernet0 + """) + + parser.add_argument('-d', '--detail', action='store_true', help='LLDP neighbors detail information', default=False) + parser.add_argument('-p', '--port', type=str, help='LLDP neighbors detail information for given port', default=None) + args = parser.parse_args() + + lldp_detail_info = args.detail + lldp_port = args.port + + if lldp_port and not lldp_detail_info: + lldp_detail_info = True + try: lldp = Lldpshow() - lldp.get_info() - lldp.parse_info() - lldp.display_sum() + lldp.get_info(lldp_detail_info, lldp_port) + lldp.parse_info(lldp_detail_info) + lldp.display_sum(lldp_detail_info) except Exception as e: print(e.message, file=sys.stderr) sys.exit(1) diff --git a/show/main.py b/show/main.py index 1a6fd80284..f2d4df5308 100755 --- a/show/main.py +++ b/show/main.py @@ -1629,13 +1629,13 @@ def lldp(): @click.option('--verbose', is_flag=True, help="Enable verbose output") def neighbors(interfacename, verbose): """Show LLDP neighbors""" - cmd = "sudo lldpctl" + cmd = "sudo lldpshow -d" if interfacename is not None: if get_interface_mode() == "alias": interfacename = iface_alias_converter.alias_to_name(interfacename) - cmd += " {}".format(interfacename) + cmd += " -p {}".format(interfacename) run_command(cmd, display_cmd=verbose) From 05253a28c5f195d53d482efb0ecae3924cf235cf Mon Sep 17 00:00:00 2001 From: abdosi <58047199+abdosi@users.noreply.github.com> Date: Fri, 29 May 2020 18:46:07 -0700 Subject: [PATCH 081/111] Make sure db_migrator is run after all config are loaded during (#926) load_minigraph. The behaviour got changed as part of multi-npu change so make it correct again. --- config/main.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/config/main.py b/config/main.py index 30fc4cd5c4..22dd494854 100755 --- a/config/main.py +++ b/config/main.py @@ -908,14 +908,19 @@ def load_minigraph(no_service_restart): run_command('{} pfcwd start_default'.format(ns_cmd_prefix), display_cmd=True) run_command("{} config qos reload".format(ns_cmd_prefix), display_cmd=True) - # Write latest db version string into db - db_migrator='/usr/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) - if os.path.isfile('/etc/sonic/acl.json'): run_command("acl-loader update full /etc/sonic/acl.json", display_cmd=True) - + + # Write latest db version string into db + db_migrator='/usr/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + for namespace in namespace_list: + if namespace is DEFAULT_NAMESPACE: + cfggen_namespace_option = " " + else: + cfggen_namespace_option = " -n {}".format(namespace) + run_command(db_migrator + ' -o set_version' + cfggen_namespace_option) + # We first run "systemctl reset-failed" to remove the "failed" # status from all services before we attempt to restart them if not no_service_restart: From a88ab008b6847eabd15eba6e76510a9994dc8962 Mon Sep 17 00:00:00 2001 From: yangshiping <43633727+yangshp1987@users.noreply.github.com> Date: Wed, 3 Jun 2020 06:48:07 +0800 Subject: [PATCH 082/111] add fec config/show option (#764) Signed-off-by:yangshiping@jd.com --- config/main.py | 19 +++++++++++++++++++ scripts/intfutil | 5 ++++- sonic-utilities-tests/intfutil_test.py | 6 +++--- .../mock_tables/appl_db.json | 1 + 4 files changed, 27 insertions(+), 4 deletions(-) diff --git a/config/main.py b/config/main.py index 22dd494854..08747ca62d 100755 --- a/config/main.py +++ b/config/main.py @@ -1954,6 +1954,25 @@ def mtu(ctx, interface_name, interface_mtu, verbose): command += " -vv" run_command(command, display_cmd=verbose) +@interface.command() +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('interface_fec', metavar='', required=True) +@click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") +def fec(ctx, interface_name, interface_fec, verbose): + """Set interface fec""" + if interface_fec not in ["rs", "fc", "none"]: + ctx.fail("'fec not in ['rs', 'fc', 'none']!") + if get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + command = "portconfig -p {} -f {}".format(interface_name, interface_fec) + if verbose: + command += " -vv" + run_command(command, display_cmd=verbose) + # # 'ip' subgroup ('config interface ip ...') # diff --git a/scripts/intfutil b/scripts/intfutil index bdee791122..b0efa0dd38 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -32,6 +32,7 @@ PORT_OPER_STATUS = "oper_status" PORT_ADMIN_STATUS = "admin_status" PORT_SPEED = "speed" PORT_MTU_STATUS = "mtu" +PORT_FEC = "fec" PORT_DESCRIPTION = "description" PORT_OPTICS_TYPE = "type" PORT_PFC_ASYM_STATUS = "pfc_asym" @@ -339,7 +340,7 @@ def appl_db_sub_intf_status_get(appl_db, config_db, front_panel_ports_list, port # ========================== interface-status logic ========================== -header_stat = ['Interface', 'Lanes', 'Speed', 'MTU', 'Alias', 'Vlan', 'Oper', 'Admin', 'Type', 'Asym PFC'] +header_stat = ['Interface', 'Lanes', 'Speed', 'MTU', 'FEC', 'Alias', 'Vlan', 'Oper', 'Admin', 'Type', 'Asym PFC'] header_stat_sub_intf = ['Sub port interface', 'Speed', 'MTU', 'Vlan', 'Admin', 'Type'] class IntfStatus(object): @@ -365,6 +366,7 @@ class IntfStatus(object): appl_db_port_status_get(self.appl_db, key, PORT_LANES_STATUS), appl_db_port_status_get(self.appl_db, key, PORT_SPEED), appl_db_port_status_get(self.appl_db, key, PORT_MTU_STATUS), + appl_db_port_status_get(self.appl_db, key, PORT_FEC), appl_db_port_status_get(self.appl_db, key, PORT_ALIAS), config_db_vlan_port_keys_get(self.combined_int_to_vlan_po_dict, self.front_panel_ports_list, key), appl_db_port_status_get(self.appl_db, key, PORT_OPER_STATUS), @@ -378,6 +380,7 @@ class IntfStatus(object): appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_LANES_STATUS, self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_SPEED, self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_MTU_STATUS, self.portchannel_speed_dict), + appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_FEC, self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_ALIAS, self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, "vlan", self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_OPER_STATUS, self.portchannel_speed_dict), diff --git a/sonic-utilities-tests/intfutil_test.py b/sonic-utilities-tests/intfutil_test.py index 44791729bb..67f6f9b6a8 100644 --- a/sonic-utilities-tests/intfutil_test.py +++ b/sonic-utilities-tests/intfutil_test.py @@ -27,9 +27,9 @@ def test_intf_status(self): result = self.runner.invoke(show.cli.commands["interfaces"].commands["status"], []) print >> sys.stderr, result.output expected_output = ( - "Interface Lanes Speed MTU Alias Vlan Oper Admin Type Asym PFC\n" - "----------- ------- ------- ----- --------- ------ ------ ------- --------------- ----------\n" - " Ethernet0 0 25G 9100 Ethernet0 routed down up QSFP28 or later off" + "Interface Lanes Speed MTU FEC Alias Vlan Oper Admin Type Asym PFC\n" + "----------- ------- ------- ----- ----- --------- ------ ------ ------- --------------- ----------\n" + " Ethernet0 0 25G 9100 rs Ethernet0 routed down up QSFP28 or later off" ) self.assertEqual(result.output.strip(), expected_output) diff --git a/sonic-utilities-tests/mock_tables/appl_db.json b/sonic-utilities-tests/mock_tables/appl_db.json index 19768f75a2..96c071e3a2 100644 --- a/sonic-utilities-tests/mock_tables/appl_db.json +++ b/sonic-utilities-tests/mock_tables/appl_db.json @@ -7,6 +7,7 @@ "oper_status": "down", "pfc_asym": "off", "mtu": "9100", + "fec": "rs", "admin_status": "up" }, "PORT_TABLE:Ethernet200": { From 4e1e61cf0a52a1af3c71aa576a56408d14e6e94a Mon Sep 17 00:00:00 2001 From: Steven LU <45245946+stevenlu99@users.noreply.github.com> Date: Fri, 5 Jun 2020 18:24:01 -0700 Subject: [PATCH 083/111] Add_intf_range (#913) * Add_intf_range --- config/main.py | 57 ++++++++++++++++++++------------- doc/Command-Reference.md | 31 +++++++++++++++++- scripts/intfutil | 16 ++++++--- scripts/portstat | 23 ++++++++----- show/main.py | 5 ++- utilities_common/intf_filter.py | 43 +++++++++++++++++++++++++ 6 files changed, 137 insertions(+), 38 deletions(-) create mode 100755 utilities_common/intf_filter.py diff --git a/config/main.py b/config/main.py index 08747ca62d..41963159e1 100755 --- a/config/main.py +++ b/config/main.py @@ -14,6 +14,7 @@ import ipaddress from swsssdk import ConfigDBConnector, SonicV2Connector, SonicDBConfig from minigraph import parse_device_desc_xml +from utilities_common.intf_filter import parse_interface_in_filter import aaa import mlnx @@ -1843,21 +1844,26 @@ def startup(ctx, interface_name): if interface_name is None: ctx.fail("'interface_name' is None!") - if interface_name_is_valid(interface_name) is False: - ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + intf_fs = parse_interface_in_filter(interface_name) + if len(intf_fs) == 1 and interface_name_is_valid(interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") log_info("'interface startup {}' executing...".format(interface_name)) + port_dict = config_db.get_table('PORT') + for port_name in port_dict.keys(): + if port_name in intf_fs: + config_db.mod_entry("PORT", port_name, {"admin_status": "up"}) + + portchannel_list = config_db.get_table("PORTCHANNEL") + for po_name in portchannel_list.keys(): + if po_name in intf_fs: + config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "up"}) + + subport_list = config_db.get_table("VLAN_SUB_INTERFACE") + for sp_name in subport_list.keys(): + if sp_name in intf_fs: + config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "up"}) - if interface_name.startswith("Ethernet"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.mod_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "up"}) - else: - config_db.mod_entry("PORT", interface_name, {"admin_status": "up"}) - elif interface_name.startswith("PortChannel"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.mod_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "up"}) - else: - config_db.mod_entry("PORTCHANNEL", interface_name, {"admin_status": "up"}) # # 'shutdown' subcommand # @@ -1874,19 +1880,24 @@ def shutdown(ctx, interface_name): if interface_name is None: ctx.fail("'interface_name' is None!") - if interface_name_is_valid(interface_name) is False: + intf_fs = parse_interface_in_filter(interface_name) + if len(intf_fs) == 1 and interface_name_is_valid(interface_name) is False: ctx.fail("Interface name is invalid. Please enter a valid interface name!!") - if interface_name.startswith("Ethernet"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.mod_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "down"}) - else: - config_db.mod_entry("PORT", interface_name, {"admin_status": "down"}) - elif interface_name.startswith("PortChannel"): - if VLAN_SUB_INTERFACE_SEPARATOR in interface_name: - config_db.mod_entry("VLAN_SUB_INTERFACE", interface_name, {"admin_status": "down"}) - else: - config_db.mod_entry("PORTCHANNEL", interface_name, {"admin_status": "down"}) + port_dict = config_db.get_table('PORT') + for port_name in port_dict.keys(): + if port_name in intf_fs: + config_db.mod_entry("PORT", port_name, {"admin_status": "down"}) + + portchannel_list = config_db.get_table("PORTCHANNEL") + for po_name in portchannel_list.keys(): + if po_name in intf_fs: + config_db.mod_entry("PORTCHANNEL", po_name, {"admin_status": "down"}) + + subport_list = config_db.get_table("VLAN_SUB_INTERFACE") + for sp_name in subport_list.keys(): + if sp_name in intf_fs: + config_db.mod_entry("VLAN_SUB_INTERFACE", sp_name, {"admin_status": "down"}) # # 'speed' subcommand diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index a6598b717e..e6bb9ba6df 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2312,7 +2312,7 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte show interfaces counters [-a|--printall] [-p|--period ] show interfaces counters errors show interfaces counters rates - show interfaces counters rif [-p|--period ] + show interfaces counters rif [-p|--period ] [-i ] ``` - Example: @@ -2327,6 +2327,13 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte Ethernet16 U 16,679,692,972 13.83 MB/s 0.27% 0 17,605 0 18,206,586,265 17.51 MB/s 0.34% 0 0 0 Ethernet20 U 47,983,339,172 35.89 MB/s 0.70% 0 2,174 0 58,986,354,359 51.83 MB/s 1.01% 0 0 0 Ethernet24 U 33,543,533,441 36.59 MB/s 0.71% 0 1,613 0 43,066,076,370 49.92 MB/s 0.97% 0 0 0 + + admin@sonic:~$ show interfaces counters -i Ethernet4,Ethernet12-16 + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL TX_ERR TX_DRP TX_OVR + ----------- ------- --------------- ----------- --------- -------- -------- -------- --------------- ----------- --------- -------- -------- -------- + Ethernet4 U 453,838,006,636 632.97 MB/s 12.36% 0 1,636 0 388,299,875,056 529.34 MB/s 10.34% 0 0 0 + Ethernet12 U 458,052,204,029 636.84 MB/s 12.44% 0 17,614 0 388,341,776,615 527.37 MB/s 10.30% 0 0 0 + Ethernet16 U 16,679,692,972 13.83 MB/s 0.27% 0 17,605 0 18,206,586,265 17.51 MB/s 0.34% 0 0 0 ``` The "errors" subcommand is used to display the interface errors. @@ -2527,6 +2534,18 @@ This command displays some more fields such as Lanes, Speed, MTU, Type, Asymmetr Ethernet0 101,102 40G 9100 fortyGigE1/1/1 up up ``` +- Example (to only display the status for range of interfaces): + ``` + admin@sonic:~$ show interfaces status Ethernet8,Ethernet168-180 + Interface Lanes Speed MTU Alias Oper Admin Type Asym PFC + ----------- ----------------- ------- ----- --------------- ------ ------- ------ ---------- + Ethernet8 49,50,51,52 100G 9100 hundredGigE3 down down N/A N/A + Ethernet168 9,10,11,12 100G 9100 hundredGigE43 down down N/A N/A + Ethernet172 13,14,15,16 100G 9100 hundredGigE44 down down N/A N/A + Ethernet176 109,110,111,112 100G 9100 hundredGigE45 down down N/A N/A + Ethernet180 105,106,107,108 100G 9100 hundredGigE46 down down N/A N/A + ``` + **show interfaces transceiver** This command is already explained [here](#Transceivers) @@ -2720,6 +2739,11 @@ This command is used to administratively shut down either the Physical interface admin@sonic:~$ sudo config interface Ethernet63 shutdown ``` + shutdown multiple interfaces + ``` + admin@sonic:~$ sudo config interface shutdown Ethernet8,Ethernet16-20,Ethernet32 + ``` + **config interface startup (Versions >= 201904)** **config interface startup (Versions <= 201811)** @@ -2748,6 +2772,11 @@ This command is used for administratively bringing up the Physical interface or admin@sonic:~$ sudo config interface Ethernet63 startup ``` + startup multiple interfaces + ``` + admin@sonic:~$ sudo config interface startup Ethernet8,Ethernet16-20,Ethernet32 + ``` + **config interface speed (Versions >= 201904)** **config interface speed (Versions <= 201811)** diff --git a/scripts/intfutil b/scripts/intfutil index b0efa0dd38..f51c945936 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -8,6 +8,8 @@ from tabulate import tabulate from natsort import natsorted from swsssdk import ConfigDBConnector from pprint import pprint +from utilities_common.intf_filter import parse_interface_in_filter + import os # mock the redis for unit test purposes # @@ -345,7 +347,7 @@ header_stat_sub_intf = ['Sub port interface', 'Speed', 'MTU', 'Vlan', 'Admin', ' class IntfStatus(object): - def display_intf_status(self, appl_db_keys, front_panel_ports_list, portchannel_speed_dict, appl_db_sub_intf_keys, sub_intf_list, sub_intf_only): + def display_intf_status(self, intf_name, appl_db_keys, front_panel_ports_list, portchannel_speed_dict, appl_db_sub_intf_keys, sub_intf_list, sub_intf_only): """ Generate interface-status output """ @@ -354,6 +356,8 @@ class IntfStatus(object): table = [] key = [] + intf_fs = parse_interface_in_filter(intf_name) + # # Iterate through all the keys and append port's associated state to # the result table. @@ -362,7 +366,8 @@ class IntfStatus(object): for i in appl_db_keys: key = re.split(':', i, maxsplit=1)[-1].strip() if key in front_panel_ports_list: - table.append((key, + if intf_name is None or key in intf_fs: + table.append((key, appl_db_port_status_get(self.appl_db, key, PORT_LANES_STATUS), appl_db_port_status_get(self.appl_db, key, PORT_SPEED), appl_db_port_status_get(self.appl_db, key, PORT_MTU_STATUS), @@ -376,7 +381,8 @@ class IntfStatus(object): for po, value in portchannel_speed_dict.iteritems(): if po: - table.append((po, + if intf_name is None or po in intf_fs: + table.append((po, appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_LANES_STATUS, self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_SPEED, self.portchannel_speed_dict), appl_db_portchannel_status_get(self.appl_db, self.config_db, po, PORT_MTU_STATUS, self.portchannel_speed_dict), @@ -434,7 +440,7 @@ class IntfStatus(object): intf_name = intf_name[:sub_intf_sep_idx] self.front_panel_ports_list = get_frontpanel_port_list(self.config_db) - appl_db_keys = appl_db_keys_get(self.appl_db, self.front_panel_ports_list, intf_name) + appl_db_keys = appl_db_keys_get(self.appl_db, self.front_panel_ports_list, None) self.int_to_vlan_dict = get_interface_vlan_dict(self.config_db) self.get_raw_po_int_configdb_info = get_raw_portchannel_info(self.config_db) self.portchannel_list = get_portchannel_list(self.get_raw_po_int_configdb_info) @@ -449,7 +455,7 @@ class IntfStatus(object): appl_db_sub_intf_keys = appl_db_sub_intf_keys_get(self.appl_db, self.sub_intf_list, sub_intf_name) if appl_db_keys is None: return - self.display_intf_status(appl_db_keys, self.front_panel_ports_list, self.portchannel_speed_dict, appl_db_sub_intf_keys, self.sub_intf_list, sub_intf_only) + self.display_intf_status(intf_name, appl_db_keys, self.front_panel_ports_list, self.portchannel_speed_dict, appl_db_sub_intf_keys, self.sub_intf_list, sub_intf_only) diff --git a/scripts/portstat b/scripts/portstat index 3000f5469f..42757d3677 100644 --- a/scripts/portstat +++ b/scripts/portstat @@ -18,6 +18,7 @@ from collections import namedtuple, OrderedDict from natsort import natsorted from tabulate import tabulate from utilities_common.netstat import ns_diff, ns_brate, ns_prate, ns_util, table_as_json +from utilities_common.intf_filter import parse_interface_in_filter PORT_RATE = 40 @@ -128,7 +129,7 @@ class Portstat(object): else: return STATUS_NA - def cnstat_print(self, cnstat_dict, use_json, print_all, errors_only, rates_only): + def cnstat_print(self, cnstat_dict, intf_list, use_json, print_all, errors_only, rates_only): """ Print the cnstat. """ @@ -138,7 +139,8 @@ class Portstat(object): for key, data in cnstat_dict.iteritems(): if key == 'time': continue - + if intf_list and key not in intf_list: + continue if print_all: header = header_all table.append((key, self.get_port_state(key), @@ -169,7 +171,7 @@ class Portstat(object): else: print tabulate(table, header, tablefmt='simple', stralign='right') - def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, use_json, print_all, errors_only, rates_only): + def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list, use_json, print_all, errors_only, rates_only): """ Print the difference between two cnstat results. """ @@ -186,6 +188,8 @@ class Portstat(object): if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) + if intf_list and key not in intf_list: + continue port_speed = self.get_port_speed(key) if print_all: header = header_all @@ -306,6 +310,7 @@ Examples: parser.add_argument('-R', '--rate', action='store_true', help='Display interface rates') parser.add_argument('-t', '--tag', type=str, help='Save stats with name TAG', default=None) parser.add_argument('-p', '--period', type=int, help='Display stats over a specified period (in seconds).', default=0) + parser.add_argument('-i', '--interface', type=str, help='Display stats for interface lists.', default=None) args = parser.parse_args() save_fresh_stats = args.clear @@ -319,7 +324,7 @@ Examples: uid = str(os.getuid()) wait_time_in_seconds = args.period print_all = args.all - + intf_fs = args.interface if tag_name is not None: cnstat_file = uid + "-" + tag_name else: @@ -351,13 +356,15 @@ Examples: os.rmdir(cnstat_dir) sys.exit(0) + intf_list = parse_interface_in_filter(intf_fs) + portstat = Portstat() # The cnstat_dict just give an ordered dict of all output. cnstat_dict = portstat.get_cnstat() # Now decide what information to display if raw_stats: - portstat.cnstat_print(cnstat_dict, use_json, print_all, errors_only, rates_only) + portstat.cnstat_print(cnstat_dict, intf_list, use_json, print_all, errors_only, rates_only) sys.exit(0) # At this point, either we'll create a file or open an existing one. @@ -384,7 +391,7 @@ Examples: try: cnstat_cached_dict = pickle.load(open(cnstat_fqn_file, 'r')) print "Last cached time was " + str(cnstat_cached_dict.get('time')) - portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, use_json, print_all, errors_only, rates_only) + portstat.cnstat_diff_print(cnstat_dict, cnstat_cached_dict, intf_list, use_json, print_all, errors_only, rates_only) except IOError as e: print e.errno, e else: @@ -392,13 +399,13 @@ Examples: print "\nFile '%s' does not exist" % cnstat_fqn_file print "Did you run 'portstat -c -t %s' to record the counters via tag %s?\n" % (tag_name, tag_name) else: - portstat.cnstat_print(cnstat_dict, use_json, print_all, errors_only, rates_only) + portstat.cnstat_print(cnstat_dict, intf_list, use_json, print_all, errors_only, rates_only) else: #wait for the specified time and then gather the new stats and output the difference. time.sleep(wait_time_in_seconds) print "The rates are calculated within %s seconds period" % wait_time_in_seconds cnstat_new_dict = portstat.get_cnstat() - portstat.cnstat_diff_print(cnstat_new_dict, cnstat_dict, use_json, print_all, errors_only, rates_only) + portstat.cnstat_diff_print(cnstat_new_dict, cnstat_dict, intf_list, use_json, print_all, errors_only, rates_only) if __name__ == "__main__": main() diff --git a/show/main.py b/show/main.py index f2d4df5308..6fdc243da9 100755 --- a/show/main.py +++ b/show/main.py @@ -947,9 +947,10 @@ def status(interfacename, verbose): @interfaces.group(invoke_without_command=True) @click.option('-a', '--printall', is_flag=True) @click.option('-p', '--period') +@click.option('-i', '--interface') @click.option('--verbose', is_flag=True, help="Enable verbose output") @click.pass_context -def counters(ctx, verbose, period, printall): +def counters(ctx, verbose, period, interface, printall): """Show interface counters""" if ctx.invoked_subcommand is None: @@ -959,6 +960,8 @@ def counters(ctx, verbose, period, printall): cmd += " -a" if period is not None: cmd += " -p {}".format(period) + if interface is not None: + cmd += " -i {}".format(interface) run_command(cmd, display_cmd=verbose) diff --git a/utilities_common/intf_filter.py b/utilities_common/intf_filter.py new file mode 100755 index 0000000000..be5ddaed8f --- /dev/null +++ b/utilities_common/intf_filter.py @@ -0,0 +1,43 @@ +# Interface filtering functions + +SONIC_PORT_NAME_PREFIX = "Ethernet" +SONIC_LAG_NAME_PREFIX = "PortChannel" + +def parse_interface_in_filter(intf_filter): + intf_fs = [] + + if intf_filter is None: + return intf_fs + + fs = intf_filter.split(',') + for x in fs: + if '-' in x: + # handle range + if not x.startswith(SONIC_PORT_NAME_PREFIX) and not x.startswith(SONIC_LAG_NAME_PREFIX): + continue + if x.startswith(SONIC_PORT_NAME_PREFIX): + intf = SONIC_PORT_NAME_PREFIX + if x.startswith(SONIC_LAG_NAME_PREFIX): + intf = SONIC_LAG_NAME_PREFIX + start = x.split('-')[0].split(intf,1)[1] + end = x.split('-')[1] + + if not start.isdigit() or not end.isdigit(): + continue + for i in range(int(start), int(end)+1): + intf_fs.append(intf+str(i)) + else: + intf_fs.append(x) + + return intf_fs + +def interface_in_filter(intf, filter): + if filter is None: + return True + + intf_fs = parse_interface_in_filter(filter) + if intf in intf_fs: + return True + + return False + From 67a0c6cf1cbcfe8a0c61c2b550b72185dc82127e Mon Sep 17 00:00:00 2001 From: Vadym Hlushko <62022266+vadymhlushko-mlnx@users.noreply.github.com> Date: Tue, 9 Jun 2020 19:29:32 +0300 Subject: [PATCH 084/111] [warm-reboot]: added pre-check for ISSU file (#915) * [warm-reboot]: added pre-check for ISSU file Signed-off-by: Vadym Hlushko * [warm-reboot]: fixed problems according to PR review comments Signed-off-by: Vadym Hlushko * [warm-reboot]: added distinguish token and error code to clarify when issu_bank.txt was corrupted Signed-off-by: Vadym Hlushko * [warm-reboot]: fixed indentation problems Signed-off-by: Vadym Hlushko * [warm-reboot]: removed script interrupt when issu_bank.txt is broken Signed-off-by: Vadym Hlushko * [warm-reboot]: added script interrupt if issu_bank.txt is broken before call request_pre_shutdown Signed-off-by: Vadym Hlushko --- scripts/fast-reboot | 46 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 0478b3af1e..35989af3ec 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -156,6 +156,42 @@ function request_pre_shutdown() } } +function recover_issu_bank_file_instruction() +{ + debug "To recover (${ISSU_BANK_FILE}) file, do the following:" + debug "$ docker exec -it syncd sx_api_dbg_generate_dump.py" + debug "$ docker exec -it syncd cat /tmp/sdkdump | grep 'ISSU Bank'" + debug "Command above will print the VALUE of ISSU BANK - 0 or 1, use this VALUE in the next command" + debug "$ printf VALUE > /host/warmboot/issu_bank.txt" +} + +function check_issu_bank_file() +{ + ISSU_BANK_FILE=/host/warmboot/issu_bank.txt + MLNX_ISSU_BANK_BROKEN=102 + + if [[ ! -s "$ISSU_BANK_FILE" ]]; then + error "(${ISSU_BANK_FILE}) does NOT exist or empty ..." + recover_issu_bank_file_instruction + if [[ "$1" = true ]]; then + exit "${MLNX_ISSU_BANK_BROKEN}" + fi + return + fi + + issu_file_chars_count=`stat -c %s ${ISSU_BANK_FILE}`; + issu_file_content=`awk '{print $0}' ${ISSU_BANK_FILE}` + + if [[ $issu_file_chars_count != 1 ]] || + [[ "$issu_file_content" != "0" && "$issu_file_content" != "1" ]]; then + error "(${ISSU_BANK_FILE}) is broken ..." + recover_issu_bank_file_instruction + if [[ "$1" = true ]]; then + exit "${MLNX_ISSU_BANK_BROKEN}" + fi + fi +} + function wait_for_pre_shutdown_complete_or_fail() { debug "Waiting for pre-shutdown ..." @@ -484,10 +520,20 @@ systemctl stop swss if [[ "$REBOOT_TYPE" = "warm-reboot" || "$REBOOT_TYPE" = "fastfast-reboot" ]]; then initialize_pre_shutdown + BEFORE_PRE_SHUTDOWN=true + + if [[ "x$sonic_asic_type" == x"mellanox" ]]; then + check_issu_bank_file "$BEFORE_PRE_SHUTDOWN" + fi + request_pre_shutdown wait_for_pre_shutdown_complete_or_fail + if [[ "x$sonic_asic_type" == x"mellanox" ]]; then + check_issu_bank_file + fi + # Warm reboot: dump state to host disk if [[ "$REBOOT_TYPE" = "fastfast-reboot" ]]; then sonic-db-cli ASIC_DB FLUSHDB > /dev/null From a796edf6b4ebebb7f4cb8eec3d66b80cf34f28fd Mon Sep 17 00:00:00 2001 From: Syd Logan Date: Wed, 10 Jun 2020 16:06:52 -0700 Subject: [PATCH 085/111] [show] Add support for SONiC Gearbox Manager via new gearboxutil utility (#931) * add and modify command line utilities to support gearbox phy * added build time mock unit tests HLD is located at https://github.com/Azure/SONiC/blob/b817a12fd89520d3fd26bbc5897487928e7f6de7/doc/gearbox/gearbox_mgr_design.md Signed-off-by: syd.logan@broadcom.com --- doc/Command-Reference.md | 52 ++++ scripts/gearboxutil | 225 ++++++++++++++++++ setup.py | 1 + show/main.py | 37 +++ sonic-utilities-tests/gearbox_test.py | 51 ++++ .../mock_tables/appl_db.json | 29 ++- 6 files changed, 394 insertions(+), 1 deletion(-) create mode 100755 scripts/gearboxutil create mode 100644 sonic-utilities-tests/gearbox_test.py diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index e6bb9ba6df..9ee91a88cd 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -43,6 +43,8 @@ * [ECN](#ecn) * [ECN show commands](#ecn-show-commands) * [ECN config commands](#ecn-config-commands) +* [Gearbox](#gearbox) + * [Gearbox show commands](#gearbox-show-commands) * [Interfaces](#interfaces) * [Interface Show Commands](#interface-show-commands) * [Interface Config Commands](#interface-config-commands) @@ -2257,6 +2259,56 @@ The list of the WRED profile fields that are configurable is listed in the below Go Back To [Beginning of the document](#) or [Beginning of this section](#ecn) +## Gearbox + +This section explains all the Gearbox PHY show commands that are supported in SONiC. + +### Gearbox show commands +This sub-section contains the show commands that are supported for gearbox phy. + +**show gearbox interfaces status** + +This command displays information about the gearbox phy interface lanes, speeds and status. Data is displayed for both MAC side and line side of the gearbox phy + +- Usage: + ``` + show gearbox interfaces status + ``` + +- Example: + +``` +home/admin# show gearbox interfaces status + PHY Id Interface MAC Lanes MAC Lane Speed PHY Lanes PHY Lane Speed Line Lanes Line Lane Speed Oper Admin +-------- ----------- ----------- ---------------- ----------- ---------------- ------------ ----------------- ------ ------- + 1 Ethernet0 25,26,27,28 10G 200,201 20G 206 40G up up + 1 Ethernet4 29,30,31,32 10G 202,203 20G 207 40G up up + 1 Ethernet8 33,34,35,36 10G 204,205 20G 208 40G up up + + ``` + +**show gearbox phys status** + +This command displays basic information about the gearbox phys configured on the switch. + +- Usage: + ``` + show gearbox phys status + ``` + +- Example: + +``` +/home/admin# show gearbox phys status + PHY Id Name Firmware +-------- ------- ---------- + 1 sesto-1 v0.1 + + ``` + +Go Back To [Beginning of the document](#) or [Beginning of this section](#gearbox) + + ## Update Device Hostname Configuration Commands This sub-section of commands is used to change device hostname without traffic being impacted. diff --git a/scripts/gearboxutil b/scripts/gearboxutil new file mode 100755 index 0000000000..b6b2818dee --- /dev/null +++ b/scripts/gearboxutil @@ -0,0 +1,225 @@ +#! /usr/bin/python + +import swsssdk +import sys +from tabulate import tabulate +from natsort import natsorted + +import os + +# mock the redis for unit test purposes # +try: + if os.environ["UTILITIES_UNIT_TESTING"] == "1": + modules_path = os.path.join(os.path.dirname(__file__), "..") + tests_path = os.path.join(modules_path, "sonic-utilities-tests") + sys.path.insert(0, modules_path) + sys.path.insert(0, tests_path) + import mock_tables.dbconnector + client = mock_tables.dbconnector.redis.StrictRedis() + if client.keys() is None: + raise Exception("Invalid mock_table keys") +except KeyError: + pass + +# ========================== Common gearbox-utils logic ========================== + +GEARBOX_TABLE_PHY_PREFIX = "_GEARBOX_TABLE:phy:{}" +GEARBOX_TABLE_INTERFACE_PREFIX = "_GEARBOX_TABLE:interface:{}" +GEARBOX_TABLE_PORT_PREFIX = "_GEARBOX_TABLE:phy:{}:ports:{}" + +PORT_TABLE_ETHERNET_PREFIX = "PORT_TABLE:{}" + +PHY_NAME = "name" +PHY_ID = "phy_id" +PHY_FIRMWARE_MAJOR_VERSION = "firmware_major_version" +PHY_LINE_LANES = "line_lanes" +PHY_SYSTEM_LANES = "system_lanes" + +PORT_OPER_STATUS = "oper_status" +PORT_ADMIN_STATUS = "admin_status" +PORT_SYSTEM_SPEED = "system_speed" +PORT_LINE_SPEED = "line_speed" + +INTF_NAME = "name" +INTF_LANES = "lanes" +INTF_SPEED = "speed" + +def get_appl_key_attr(db, key, attr, lane_count=1): + """ + Get APPL_DB key attribute + """ + + val = db.get(db.APPL_DB, key, attr) + if val is None: + return "N/A" + + if "speed" in attr: + if val == "0": + return "N/A" + + speed = int(val[:-3]) + + if (speed % lane_count == 0): + speed = speed // lane_count + else: + return "N/A" + + val = '{}G'.format(str(speed)) + + return val + +def db_connect_appl(): + appl_db = swsssdk.SonicV2Connector(host='127.0.0.1') + if appl_db is None: + return None + appl_db.connect(appl_db.APPL_DB) + return appl_db + +def db_connect_state(): + """ + Connect to REDIS STATE DB and get optics info + """ + state_db = swsssdk.SonicV2Connector(host='127.0.0.1') + if state_db is None: + return None + state_db.connect(state_db.STATE_DB, False) # Make one attempt only + return state_db + +def appl_db_keys_get(appl_db): + """ + Get APPL_DB Keys + """ + return appl_db.keys(appl_db.APPL_DB, GEARBOX_TABLE_PHY_PREFIX.format("*")) + +def appl_db_interface_keys_get(appl_db): + """ + Get APPL_DB Keys + """ + return appl_db.keys(appl_db.APPL_DB, GEARBOX_TABLE_INTERFACE_PREFIX.format("*")) + +# ========================== phy-status logic ========================== + +phy_header_status = ['PHY Id', 'Name', 'Firmware'] + +class PhyStatus(object): + + def display_phy_status(self, appl_db_keys): + """ + Generate phy status output + """ + table = [] + key = [] + + for key in appl_db_keys: + if 'lanes' in key or 'ports' in key: + continue + list_items = key.split(':') + phy_id = list_items[2] + data_row = ( + phy_id, + get_appl_key_attr(self.appl_db, GEARBOX_TABLE_PHY_PREFIX.format(phy_id), PHY_NAME), + get_appl_key_attr(self.appl_db, GEARBOX_TABLE_PHY_PREFIX.format(phy_id), PHY_FIRMWARE_MAJOR_VERSION)) + table.append(data_row) + + # Sorting and tabulating the result table. + sorted_table = natsorted(table) + print tabulate(sorted_table, phy_header_status, tablefmt="simple", stralign='right') + + def __init__(self): + self.appl_db = db_connect_appl() + if self.appl_db is None: + return + + appl_db_keys = appl_db_keys_get(self.appl_db) + if appl_db_keys is None: + return + + self.display_phy_status(appl_db_keys) + +# ========================== interface-status logic ========================== + +intf_header_status = ['PHY Id', 'Interface', 'MAC Lanes', 'MAC Lane Speed', 'PHY Lanes', 'PHY Lane Speed', 'Line Lanes', 'Line Lane Speed', 'Oper', 'Admin'] + +class InterfaceStatus(object): + + def display_intf_status(self, appl_db_keys): + """ + Generate phy status output + """ + table = [] + key = [] + + for key in appl_db_keys: + list_items = key.split(':') + index = list_items[2] + + name = get_appl_key_attr(self.appl_db, GEARBOX_TABLE_INTERFACE_PREFIX.format(index), INTF_NAME), + name = name[0] + + mac_lanes = get_appl_key_attr(self.appl_db, PORT_TABLE_ETHERNET_PREFIX.format(name), INTF_LANES) + lanes = mac_lanes.split(',') + lane_count = 0 + for lane in lanes: + lane_count += 1 + + phy_id = get_appl_key_attr(self.appl_db, GEARBOX_TABLE_INTERFACE_PREFIX.format(index), PHY_ID) + + data_row = ( + phy_id, + name, + mac_lanes, + get_appl_key_attr(self.appl_db, PORT_TABLE_ETHERNET_PREFIX.format(name), INTF_SPEED, lane_count), + get_appl_key_attr(self.appl_db, GEARBOX_TABLE_INTERFACE_PREFIX.format(index), PHY_SYSTEM_LANES), + get_appl_key_attr(self.appl_db, GEARBOX_TABLE_PORT_PREFIX.format(phy_id, index), PORT_SYSTEM_SPEED), + get_appl_key_attr(self.appl_db, GEARBOX_TABLE_INTERFACE_PREFIX.format(index), PHY_LINE_LANES), + get_appl_key_attr(self.appl_db, GEARBOX_TABLE_PORT_PREFIX.format(phy_id, index), PORT_LINE_SPEED), + get_appl_key_attr(self.appl_db, PORT_TABLE_ETHERNET_PREFIX.format(name), PORT_OPER_STATUS), + get_appl_key_attr(self.appl_db, PORT_TABLE_ETHERNET_PREFIX.format(name), PORT_ADMIN_STATUS)) + + table.append(data_row) + + # Sorting and tabulating the result table. + sorted_table = natsorted(table) + print tabulate(sorted_table, intf_header_status, tablefmt="simple", stralign='right') + + def __init__(self): + self.appl_db = db_connect_appl() + if self.appl_db is None: + return + + appl_db_keys = appl_db_interface_keys_get(self.appl_db) + if appl_db_keys is None: + return + + self.display_intf_status(appl_db_keys) + +def main(args): + """ + phy status + interfaces status + interfaces counters + """ + + if len(args) == 0: + print "No valid arguments provided" + return + + cmd1 = args[0] + if cmd1 != "phys" and cmd1 != "interfaces": + print "No valid command provided" + return + + cmd2 = args[1] + if cmd2 != "status" and cmd2 != "counters": + print "No valid command provided" + return + + if cmd1 == "phys" and cmd2 == "status": + PhyStatus() + elif cmd1 == "interfaces" and cmd2 == "status": + InterfaceStatus() + + sys.exit(0) + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/setup.py b/setup.py index 09e893e3f3..edffa77cd0 100644 --- a/setup.py +++ b/setup.py @@ -76,6 +76,7 @@ 'scripts/fdbclear', 'scripts/fdbshow', 'scripts/filter_fdb_entries.py', + 'scripts/gearboxutil', 'scripts/generate_dump', 'scripts/intfutil', 'scripts/intfstat', diff --git a/show/main.py b/show/main.py index 6fdc243da9..981673910d 100755 --- a/show/main.py +++ b/show/main.py @@ -2842,6 +2842,43 @@ def pool(verbose): cmd = "sudo natconfig -p" run_command(cmd, display_cmd=verbose) +# Define GEARBOX commands only if GEARBOX is configured +app_db = SonicV2Connector(host='127.0.0.1') +app_db.connect(app_db.APPL_DB) +if app_db.keys(app_db.APPL_DB, '_GEARBOX_TABLE:phy:*'): + + @cli.group(cls=AliasedGroup) + def gearbox(): + """Show gearbox info""" + pass + + # 'phys' subcommand ("show gearbox phys") + @gearbox.group(cls=AliasedGroup) + def phys(): + """Show external PHY information""" + pass + + # 'status' subcommand ("show gearbox phys status") + @phys.command() + @click.pass_context + def status(ctx): + """Show gearbox phys status""" + run_command("gearboxutil phys status") + return + + # 'interfaces' subcommand ("show gearbox interfaces") + @gearbox.group(cls=AliasedGroup) + def interfaces(): + """Show gearbox interfaces information""" + pass + + # 'status' subcommand ("show gearbox interfaces status") + @interfaces.command() + @click.pass_context + def status(ctx): + """Show gearbox interfaces status""" + run_command("gearboxutil interfaces status") + return # 'bindings' subcommand ("show nat config bindings") @config.command() diff --git a/sonic-utilities-tests/gearbox_test.py b/sonic-utilities-tests/gearbox_test.py new file mode 100644 index 0000000000..2d0cef041e --- /dev/null +++ b/sonic-utilities-tests/gearbox_test.py @@ -0,0 +1,51 @@ +import sys +import os +from click.testing import CliRunner +from unittest import TestCase + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import mock_tables.dbconnector # required by sonic-utilities-tests + +import show.main as show + +class TestGearbox(TestCase): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def setUp(self): + self.runner = CliRunner() + + def test_gearbox_phys_status_validation(self): + result = self.runner.invoke(show.cli.commands["gearbox"].commands["phys"].commands["status"], []) + print >> sys.stderr, result.output + expected_output = ( + "PHY Id Name Firmware\n" + "-------- ------- ----------\n" + " 1 sesto-1 v0.2\n" + " 2 sesto-2 v0.3" + ) + self.assertEqual(result.output.strip(), expected_output) + + def test_gearbox_interfaces_status_validation(self): + result = self.runner.invoke(show.cli.commands["gearbox"].commands["interfaces"].commands["status"], []) + print >> sys.stderr, result.output + expected_output = ( + "PHY Id Interface MAC Lanes MAC Lane Speed PHY Lanes PHY Lane Speed Line Lanes Line Lane Speed Oper Admin\n" + "-------- ----------- --------------- ---------------- --------------- ---------------- ------------ ----------------- ------ -------\n" + " 1 Ethernet200 200,201,202,203 25G 300,301,302,303 25G 304,305 50G down up" + ) + self.assertEqual(result.output.strip(), expected_output) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" diff --git a/sonic-utilities-tests/mock_tables/appl_db.json b/sonic-utilities-tests/mock_tables/appl_db.json index 96c071e3a2..4239cf949d 100644 --- a/sonic-utilities-tests/mock_tables/appl_db.json +++ b/sonic-utilities-tests/mock_tables/appl_db.json @@ -1,5 +1,6 @@ { "PORT_TABLE:Ethernet0": { + "index": "0", "lanes": "0", "alias": "Ethernet0", "description": "ARISTA01T2:Ethernet1", @@ -11,6 +12,7 @@ "admin_status": "up" }, "PORT_TABLE:Ethernet200": { + "index": "200", "lanes": "200,201,202,203", "alias": "Ethernet200", "description": "Ethernet200", @@ -18,9 +20,34 @@ "oper_status": "down", "fec": "rs", "mtu": "9100", - "pfc_asym": "off" + "pfc_asym": "off", + "admin_status": "up" }, "INTF_TABLE:Ethernet0.10": { "admin_status": "up" + }, + "_GEARBOX_TABLE:phy:1": { + "name": "sesto-1", + "phy_id": "1", + "phy_oid": "0x21000000000002", + "firmware_major_version": "v0.2" + }, + "_GEARBOX_TABLE:phy:2": { + "name": "sesto-2", + "phy_id": "2", + "phy_oid": "0x21000000000003", + "firmware_major_version": "v0.3" + }, + "_GEARBOX_TABLE:interface:200": { + "name": "Ethernet200", + "index": "200", + "line_lanes": "304,305", + "phy_id": "1", + "system_lanes": "300,301,302,303" + }, + "_GEARBOX_TABLE:phy:1:ports:200": { + "index": "200", + "line_speed": "50000", + "system_speed": "25000" } } From 40c4884d2800b8e7192d30a2e6858c0d8cf06d6e Mon Sep 17 00:00:00 2001 From: Sangita Maity Date: Thu, 11 Jun 2020 16:17:06 -0700 Subject: [PATCH 086/111] [sfputil] Add support of platform.json (#767) Signed-off-by: Sangita Maity > This PR is dependent on [sonic-platform-common/pull/72](https://github.com/Azure/sonic-platform-common/pull/72) All three PRs are necessary to run `show interfaces transceiver` command. 1. [Azure/sonic-buildimage#3912](https://github.com/Azure/sonic-buildimage/pull/3912) 2. [Azure/sonic-platform-common#72](https://github.com/Azure/sonic-platform-common/pull/72) 3. [Azure/sonic-utilities#767](https://github.com/Azure/sonic-utilities/pull/767) **- What I did** Add support of platform.json in sfputil to get correct output of `show interfaces transceiver` **- How to verify it** Check whether all the below-mentioned CLI's are working correctly. ``` Usage: show interfaces transceiver [OPTIONS] COMMAND [ARGS]... Show SFP Transceiver information Options: -?, -h, --help Show this message and exit. Commands: eeprom Show interface transceiver EEPROM information lpmode Show interface transceiver low-power mode status presence Show interface transceiver presence ``` --- sfputil/main.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sfputil/main.py b/sfputil/main.py index abfadae94e..c70c4486b1 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -30,7 +30,8 @@ # Global platform-specific sfputil class instance platform_sfputil = None - +PLATFORM_JSON = 'platform.json' +PORT_CONFIG_INI = 'port_config.ini' # ========================== Syslog wrappers ========================== @@ -325,14 +326,13 @@ def get_path_to_port_config_file(): hwsku_path = "/".join([platform_path, hwsku]) # First check for the presence of the new 'port_config.ini' file - port_config_file_path = "/".join([hwsku_path, "port_config.ini"]) + port_config_file_path = "/".join([platform_path, PLATFORM_JSON]) if not os.path.isfile(port_config_file_path): - # port_config.ini doesn't exist. Try loading the legacy 'portmap.ini' file - port_config_file_path = "/".join([hwsku_path, "portmap.ini"]) + # platform.json doesn't exist. Try loading the legacy 'port_config.ini' file + port_config_file_path = "/".join([hwsku_path, PORT_CONFIG_INI]) return port_config_file_path - # Loads platform specific sfputil module from source def load_platform_sfputil(): global platform_sfputil From b23accf302b142124a4cc16156de409dd18cccef Mon Sep 17 00:00:00 2001 From: Andriy Kokhan <43479230+akokhan@users.noreply.github.com> Date: Thu, 11 Jun 2020 17:21:47 -0700 Subject: [PATCH 087/111] Fixed fast-reboot for BFN platform (#871) Signed-off-by: Andriy Kokhan --- scripts/fast-reboot | 3 --- 1 file changed, 3 deletions(-) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 35989af3ec..d251246776 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -363,9 +363,6 @@ case "$REBOOT_TYPE" in # source mlnx-ffb.sh file with # functions to check ISSU upgrade possibility source mlnx-ffb.sh - elif [[ "$sonic_asic_type" == "barefoot" ]]; then - REBOOT_TYPE="fastfast-reboot" - BOOT_TYPE_ARG="fastfast" else BOOT_TYPE_ARG="warm" fi From e5c9f4cbd30a56458d5e77bd84d5fbd621400bfd Mon Sep 17 00:00:00 2001 From: Volodymyr Samotiy Date: Fri, 12 Jun 2020 22:39:54 +0300 Subject: [PATCH 088/111] [crm] Use swsssdk API instead of redis-cli for getting keys from redis DB (#943) --- crm/main.py | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/crm/main.py b/crm/main.py index 223029d721..241874e6e8 100644 --- a/crm/main.py +++ b/crm/main.py @@ -3,7 +3,6 @@ import click import swsssdk from tabulate import tabulate -from subprocess import Popen, PIPE class Crm: def __init__(self): @@ -123,15 +122,9 @@ def show_acl_table_resources(self): header = ("Table ID", "Resource Name", "Used Count", "Available Count") # Retrieve all ACL table keys from CRM:ACL_TABLE_STATS - # TODO - # Volodymyr is working on refactoring codes to access redis database via redis-py or swsssdk - # we should avoid using 'keys' operation via redis-cli or sonic-db-cli - # there would be an issue when KEY in database contains space or '\n' - # for loop on the non-tty 'keys' output will take the space or `\n` as seperator when parsing the element - proc = Popen("docker exec -i database redis-cli --raw -n 2 KEYS *CRM:ACL_TABLE_STATS*", stdout=PIPE, stderr=PIPE, shell=True) - out, err = proc.communicate() - - for key in out.splitlines() or [None]: + crm_acl_keys = countersdb.keys(countersdb.COUNTERS_DB, 'CRM:ACL_TABLE_STATS*') + + for key in crm_acl_keys or [None]: data = [] if key: From a1504700c99b6d42d9b3917ecf809022795f3d34 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Fri, 12 Jun 2020 16:15:27 -0700 Subject: [PATCH 089/111] [config] Don't attempt to restart disabled services (#944) When restarting services, don't attempt to restart a service if it is disabled/masked or the `systemctl restart` command will fail, causing the calling command (e.g., `config load`, `config reload`, `config load_minigraph`) to error out. --- config/main.py | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index 41963159e1..731043a668 100755 --- a/config/main.py +++ b/config/main.py @@ -497,6 +497,32 @@ def _abort_if_false(ctx, param, value): if not value: ctx.abort() + +def _get_disabled_services_list(): + disabled_services_list = [] + + config_db = ConfigDBConnector() + config_db.connect() + feature_table = config_db.get_table('FEATURE') + if feature_table is not None: + for feature_name in feature_table.keys(): + if not feature_name: + log_warning("Feature is None") + continue + + status = feature_table[feature_name]['status'] + if not status: + log_warning("Status of feature '{}' is None".format(feature_name)) + continue + + if status == "disabled": + disabled_services_list.append(feature_name) + else: + log_warning("Unable to retreive FEATURE table") + + return disabled_services_list + + def _stop_services(): # on Mellanox platform pmon is stopped by syncd services_to_stop = [ @@ -514,6 +540,7 @@ def _stop_services(): execute_systemctl(services_to_stop, SYSTEMCTL_ACTION_STOP) + def _reset_failed_services(): services_to_reset = [ 'bgp', @@ -534,8 +561,8 @@ def _reset_failed_services(): 'sflow', 'restapi' ] - execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED) + execute_systemctl(services_to_reset, SYSTEMCTL_ACTION_RESET_FAILED) def _restart_services(): @@ -555,6 +582,12 @@ def _restart_services(): 'restapi' ] + disable_services = _get_disabled_services_list() + + for service in disable_services: + if service in services_to_restart: + services_to_restart.remove(service) + if asic_type == 'mellanox' and 'pmon' in services_to_restart: services_to_restart.remove('pmon') From 3715ff8ef4e4e4a53726639bd9512f0541ff0afe Mon Sep 17 00:00:00 2001 From: madhanmellanox <62459540+madhanmellanox@users.noreply.github.com> Date: Mon, 15 Jun 2020 02:34:16 -0700 Subject: [PATCH 090/111] handled the priority of ACL rules to be case insensitive (#918) * handled the priority of ACL rules to be case insensitive * addressed code review comments of keyword Priority combinations and added a separate test case Co-authored-by: Madhan Babu --- acl_loader/main.py | 6 +++++- scripts/aclshow | 6 +++++- sonic-utilities-tests/aclshow_test.py | 18 +++++++++++++++++- .../mock_tables/config_db.json | 5 +++++ .../mock_tables/counters_db.json | 4 ++++ 5 files changed, 36 insertions(+), 3 deletions(-) diff --git a/acl_loader/main.py b/acl_loader/main.py index f9201846b3..7b7e480f6b 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -762,7 +762,11 @@ def show_rule(self, table_name, rule_id): header = ("Table", "Rule", "Priority", "Action", "Match") def pop_priority(val): - priority = val.pop("PRIORITY") + priority = "N/A" + for key in dict(val): + if (key.upper() == "PRIORITY"): + priority = val.pop(key) + return priority return priority def pop_action(val): diff --git a/scripts/aclshow b/scripts/aclshow index acfa2e12a8..22c263d405 100755 --- a/scripts/aclshow +++ b/scripts/aclshow @@ -169,8 +169,12 @@ class AclStat(object): self.get_counter_value(rule_key, 'packets') == 'N/A'): continue rule = self.acl_rules[rule_key] + rule_priority = -1 + for key,val in rule.items(): + if key.upper() == "PRIORITY": + rule_priority = val line = [rule_key[1], rule_key[0], - rule['PRIORITY'], + rule_priority, self.get_counter_value(rule_key, 'packets'), self.get_counter_value(rule_key, 'bytes')] aclstat.append(line) diff --git a/sonic-utilities-tests/aclshow_test.py b/sonic-utilities-tests/aclshow_test.py index f93d2ba32c..6ff09f18cf 100644 --- a/sonic-utilities-tests/aclshow_test.py +++ b/sonic-utilities-tests/aclshow_test.py @@ -24,6 +24,7 @@ RULE_4 DATAACL 9996 401 400 RULE_7 DATAACL 9993 701 700 RULE_9 DATAACL 9991 901 900 +RULE_10 DATAACL 9989 1001 1000 DEFAULT_RULE DATAACL 1 2 1 RULE_6 EVERFLOW 9994 601 600 """ @@ -39,6 +40,7 @@ RULE_05 DATAACL 9995 0 0 RULE_7 DATAACL 9993 701 700 RULE_9 DATAACL 9991 901 900 +RULE_10 DATAACL 9989 1001 1000 DEFAULT_RULE DATAACL 1 2 1 RULE_6 EVERFLOW 9994 601 600 RULE_08 EVERFLOW 9992 0 0 @@ -51,6 +53,13 @@ RULE_1 DATAACL 9999 101 100 """ +# Expected output for aclshow -r RULE_1 -t DATAACL +rule10_dataacl_output = '' + \ +"""RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT +----------- ------------ ------ --------------- ------------- +RULE_10 DATAACL 9989 1001 1000 +""" + # Expected output for aclshow -a -r RULE_05 rule05_all_output = ''+ \ """RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT @@ -68,7 +77,7 @@ rule4_rule6_verbose_output = '' + \ """Reading ACL info... Total number of ACL Tables: 5 -Total number of ACL Rules: 10 +Total number of ACL Rules: 11 RULE NAME TABLE NAME PRIO PACKETS COUNT BYTES COUNT ----------- ------------ ------ --------------- ------------- @@ -93,6 +102,7 @@ RULE_4 DATAACL 9996 401 400 RULE_7 DATAACL 9993 701 700 RULE_9 DATAACL 9991 901 900 +RULE_10 DATAACL 9989 1001 1000 DEFAULT_RULE DATAACL 1 2 1 """ @@ -111,6 +121,7 @@ RULE_05 DATAACL 9995 0 0 RULE_7 DATAACL 9993 0 0 RULE_9 DATAACL 9991 0 0 +RULE_10 DATAACL 9989 0 0 DEFAULT_RULE DATAACL 1 0 0 RULE_6 EVERFLOW 9994 0 0 RULE_08 EVERFLOW 9992 0 0 @@ -184,6 +195,11 @@ def test_rule0(): test = Aclshow(all = None, clear = None, rules = 'RULE_0', tables = None, verbose = None) assert test.result.getvalue() == rule0_output +# aclshow -r RULE_10 -t DATAACL +def test_rule10_lowercase_priority(): + test = Aclshow(all = None, clear = None, rules = 'RULE_10', tables = 'DATAACL', verbose = None) + assert test.result.getvalue() == rule10_dataacl_output + # aclshow -r RULE_4,RULE_6 -vv def test_rule4_rule6_verbose(): test = Aclshow(all = None, clear = None, rules = 'RULE_4,RULE_6', tables = None, verbose = True) diff --git a/sonic-utilities-tests/mock_tables/config_db.json b/sonic-utilities-tests/mock_tables/config_db.json index 42ba9b5c61..3061c3015e 100644 --- a/sonic-utilities-tests/mock_tables/config_db.json +++ b/sonic-utilities-tests/mock_tables/config_db.json @@ -93,6 +93,11 @@ "L4_DST_PORT": "4661", "PACKET_ACTION": "FORWARD", "PRIORITY": "9991" + }, + "ACL_RULE|DATAACL|RULE_10": { + "PACKET_ACTION": "DROP", + "priority": "9989", + "SRC_IP": "10.0.0.3/32" }, "ACL_TABLE|DATAACL": { "policy_desc": "DATAACL", diff --git a/sonic-utilities-tests/mock_tables/counters_db.json b/sonic-utilities-tests/mock_tables/counters_db.json index c475841193..2476837d71 100644 --- a/sonic-utilities-tests/mock_tables/counters_db.json +++ b/sonic-utilities-tests/mock_tables/counters_db.json @@ -115,6 +115,10 @@ "Bytes": "900", "Packets": "901" }, + "COUNTERS:DATAACL:RULE_10": { + "Bytes": "1000", + "Packets": "1001" + }, "COUNTERS:oid:0x1000000000002": { "SAI_PORT_STAT_IF_IN_ERRORS": "10", "SAI_PORT_STAT_IF_IN_DISCARDS": "100", From 219fea1e92624446ca303133a56e9f2b7241f2cf Mon Sep 17 00:00:00 2001 From: Nazarii Hnydyn Date: Mon, 15 Jun 2020 14:25:34 +0300 Subject: [PATCH 091/111] [fwutil]: Update fwutil to v2.0.0.0. (#941) Signed-off-by: Nazarii Hnydyn --- data/etc/bash_completion.d/fwutil | 3 + fwutil/lib.py | 312 ++++++++++++++---------------- fwutil/log.py | 12 +- fwutil/main.py | 210 +++++++++++++++----- show/main.py | 27 ++- 5 files changed, 333 insertions(+), 231 deletions(-) diff --git a/data/etc/bash_completion.d/fwutil b/data/etc/bash_completion.d/fwutil index 60ec589a6a..7974889b49 100644 --- a/data/etc/bash_completion.d/fwutil +++ b/data/etc/bash_completion.d/fwutil @@ -1,7 +1,10 @@ +shopt -s extglob + _fwutil_completion() { COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ COMP_CWORD=$COMP_CWORD \ _FWUTIL_COMPLETE=complete $1 ) ) + COMPREPLY=( ${COMPREPLY[*]//*(-install|-update)/} ) return 0 } diff --git a/fwutil/lib.py b/fwutil/lib.py index 897ead6cc8..9bfc229276 100755 --- a/fwutil/lib.py +++ b/fwutil/lib.py @@ -6,13 +6,16 @@ # try: - import click import os + import time import json import socket import urllib import subprocess + + import click import sonic_device_util + from collections import OrderedDict from urlparse import urlparse from tabulate import tabulate @@ -52,7 +55,8 @@ class URL(object): def __init__(self, url): self.__url = url self.__pb = None - self.__bytes_num = 0 + self.__pb_bytes_num = 0 + self.__pb_force_show = True def __str__(self): return self.__url @@ -68,15 +72,20 @@ def __reporthook(self, count, block_size, total_size): width=self.PB_FULL_TERMINAL_WIDTH ) - self.__pb.update(count * block_size - self.__bytes_num) - self.__bytes_num = count * block_size + self.__pb.update(count * block_size - self.__pb_bytes_num) + self.__pb_bytes_num = count * block_size + + if self.__pb_force_show: + time.sleep(1) + self.__pb_force_show = False def __pb_reset(self): if self.__pb: self.__pb.render_finish() self.__pb = None - self.__bytes_num = 0 + self.__pb_bytes_num = 0 + self.__pb_force_show = True def __validate(self): # Check basic URL syntax @@ -110,10 +119,6 @@ def retrieve(self): result = urlparse(self.__url) basename = os.path.basename(result.path) - name, extension = os.path.splitext(basename) - - if not extension: - raise RuntimeError("Filename is malformed: did not find an extension") default_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(self.DOWNLOAD_TIMEOUT) @@ -124,7 +129,7 @@ def retrieve(self): self.DOWNLOAD_PATH_TEMPLATE.format(basename), self.__reporthook ) - except Exception: + except: if os.path.exists(self.DOWNLOAD_PATH_TEMPLATE.format(basename)): os.remove(self.DOWNLOAD_PATH_TEMPLATE.format(basename)) raise @@ -283,7 +288,6 @@ class PlatformComponentsParser(object): COMPONENT_KEY = "component" FIRMWARE_KEY = "firmware" VERSION_KEY = "version" - INFO_KEY = "info" UTF8_ENCODING = "utf-8" @@ -344,18 +348,15 @@ def __parse_component_section(self, section, component, is_module_component=Fals self.__module_component_map[section][key1] = OrderedDict() if value1: - if len(value1) != 3: + if len(value1) != 1 and len(value1) != 2: self.__parser_component_fail("unexpected number of records: key={}".format(key1)) if self.FIRMWARE_KEY not in value1: missing_key = self.FIRMWARE_KEY break - elif self.VERSION_KEY not in value1: + elif len(value1) == 2 and self.VERSION_KEY not in value1: missing_key = self.VERSION_KEY break - elif self.INFO_KEY not in value1: - missing_key = self.INFO_KEY - break for key2, value2 in value1.items(): if not self.__is_str(value2): @@ -485,12 +486,9 @@ class ComponentUpdateProvider(PlatformDataProvider): """ ComponentUpdateProvider """ - STATUS_HEADER = [ "Chassis", "Module", "Component", "Firmware", "Version", "Status", "Info" ] - RESULT_HEADER = [ "Chassis", "Module", "Component", "Status" ] + STATUS_HEADER = [ "Chassis", "Module", "Component", "Firmware", "Version (Current/Available)", "Status" ] FORMAT = "simple" - FW_STATUS_UPDATE_SUCCESS = "success" - FW_STATUS_UPDATE_FAILURE = "failure" FW_STATUS_UPDATE_REQUIRED = "update is required" FW_STATUS_UP_TO_DATE = "up-to-date" @@ -545,7 +543,7 @@ def __validate_platform_schema(self, pcp): pcp.module_component_map ) - def get_status(self, force): + def get_status(self): status_table = [ ] append_chassis_name = self.is_chassis_has_components() @@ -556,42 +554,45 @@ def get_status(self, force): for chassis_component_name, chassis_component in chassis_component_map.items(): component = self.__pcp.chassis_component_map[chassis_name][chassis_component_name] - firmware_path = NA - firmware_version_current = chassis_component.get_firmware_version() - firmware_version = firmware_version_current - - status = self.FW_STATUS_UP_TO_DATE - info = NA - if component: firmware_path = component[self.__pcp.FIRMWARE_KEY] - firmware_version_available = component[self.__pcp.VERSION_KEY] - firmware_version = "{} / {}".format(firmware_version_current, firmware_version_available) - info = component[self.__pcp.INFO_KEY] if self.__root_path is not None: firmware_path = self.__root_path + firmware_path - if force or firmware_version_current != firmware_version_available: + firmware_version_current = chassis_component.get_firmware_version() + + if self.__pcp.VERSION_KEY in component: + firmware_version_available = component[self.__pcp.VERSION_KEY] + else: + firmware_version_available = chassis_component.get_available_firmware_version(firmware_path) + + if self.__root_path is not None: + firmware_path = component[self.__pcp.FIRMWARE_KEY] + + firmware_version = "{} / {}".format(firmware_version_current, firmware_version_available) + + if firmware_version_current != firmware_version_available: status = self.FW_STATUS_UPDATE_REQUIRED + else: + status = self.FW_STATUS_UP_TO_DATE - status_table.append( - [ - chassis_name if append_chassis_name else EMPTY, - module_name if append_module_na else EMPTY, - chassis_component_name, - firmware_path, - firmware_version, - status, - info - ] - ) + status_table.append( + [ + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_na else EMPTY, + chassis_component_name, + firmware_path, + firmware_version, + status + ] + ) - if append_chassis_name: - append_chassis_name = False + if append_chassis_name: + append_chassis_name = False - if append_module_na: - append_module_na = False + if append_module_na: + append_module_na = False append_chassis_name = not self.is_chassis_has_components() chassis_name = self.chassis.get_name() @@ -603,168 +604,137 @@ def get_status(self, force): for module_component_name, module_component in module_component_map.items(): component = self.__pcp.module_component_map[module_name][module_component_name] - firmware_path = NA - firmware_version_current = module_component.get_firmware_version() - firmware_version = firmware_version_current - - status = self.FW_STATUS_UP_TO_DATE - info = NA - if component: firmware_path = component[self.__pcp.FIRMWARE_KEY] - firmware_version_available = component[self.__pcp.VERSION_KEY] - firmware_version = "{} / {}".format(firmware_version_current, firmware_version_available) - info = component[self.__pcp.INFO_KEY] if self.__root_path is not None: firmware_path = self.__root_path + firmware_path - if force or firmware_version_current != firmware_version_available: - status = self.FW_STATUS_UPDATE_REQUIRED + firmware_version_current = module_component.get_firmware_version() - status_table.append( - [ - chassis_name if append_chassis_name else EMPTY, - module_name if append_module_name else EMPTY, - module_component_name, - firmware_path, - firmware_version, - status, - info - ] - ) + if self.__pcp.VERSION_KEY in component: + firmware_version_available = component[self.__pcp.VERSION_KEY] + else: + firmware_version_available = module_component.get_available_firmware_version(firmware_path) - if append_chassis_name: - append_chassis_name = False + if self.__root_path is not None: + firmware_path = component[self.__pcp.FIRMWARE_KEY] - if append_module_name: - append_module_name = False + firmware_version = "{} / {}".format(firmware_version_current, firmware_version_available) - return tabulate(status_table, self.STATUS_HEADER, tablefmt=self.FORMAT) + if firmware_version_current != firmware_version_available: + status = self.FW_STATUS_UPDATE_REQUIRED + else: + status = self.FW_STATUS_UP_TO_DATE - def update_firmware(self, force): - status_table = [ ] + status_table.append( + [ + chassis_name if append_chassis_name else EMPTY, + module_name if append_module_name else EMPTY, + module_component_name, + firmware_path, + firmware_version, + status + ] + ) - append_chassis_name = self.is_chassis_has_components() - append_module_na = not self.is_modular_chassis() - module_name = NA + if append_chassis_name: + append_chassis_name = False - for chassis_name, chassis_component_map in self.chassis_component_map.items(): - for chassis_component_name, chassis_component in chassis_component_map.items(): - component = self.__pcp.chassis_component_map[chassis_name][chassis_component_name] - component_path = "{}/{}".format( - chassis_name, - chassis_component_name - ) + if append_module_name: + append_module_name = False - firmware_version_current = chassis_component.get_firmware_version() + if not status_table: + return None - status = self.FW_STATUS_UP_TO_DATE + return tabulate(status_table, self.STATUS_HEADER, tablefmt=self.FORMAT) - if component: - firmware_path = component[self.__pcp.FIRMWARE_KEY] - firmware_version_available = component[self.__pcp.VERSION_KEY] + def get_notification(self, chassis_name, module_name, component_name): + if self.is_modular_chassis(): + component = self.module_component_map[module_name][component_name] + parser = self.__pcp.module_component_map[module_name][component_name] + else: + component = self.chassis_component_map[chassis_name][component_name] + parser = self.__pcp.chassis_component_map[chassis_name][component_name] - if self.__root_path is not None: - firmware_path = self.__root_path + firmware_path + if not parser: + return None - if force or firmware_version_current != firmware_version_available: - result = False + firmware_path = parser[self.__pcp.FIRMWARE_KEY] - try: - click.echo("Installing firmware:") - click.echo(TAB + firmware_path) + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path - log_helper.log_fw_install_start(component_path, firmware_path) + return component.get_firmware_update_notification(firmware_path) - if not os.path.exists(firmware_path): - raise RuntimeError("Path \"{}\" does not exist".format(firmware_path)) + def update_firmware(self, chassis_name, module_name, component_name): + if self.is_modular_chassis(): + component = self.module_component_map[module_name][component_name] + parser = self.__pcp.module_component_map[module_name][component_name] - result = chassis_component.install_firmware(firmware_path) - log_helper.log_fw_install_end(component_path, firmware_path, result) - except Exception as e: - log_helper.log_fw_install_end(component_path, firmware_path, False, e) - log_helper.print_error(str(e)) + component_path = "{}/{}/{}".format(chassis_name, module_name, component_name) + else: + component = self.chassis_component_map[chassis_name][component_name] + parser = self.__pcp.chassis_component_map[chassis_name][component_name] - status = self.FW_STATUS_UPDATE_SUCCESS if result else self.FW_STATUS_UPDATE_FAILURE + component_path = "{}/{}".format(chassis_name, component_name) - status_table.append( - [ - chassis_name if append_chassis_name else EMPTY, - module_name if append_module_na else EMPTY, - chassis_component_name, - status, - ] - ) + if not parser: + return - if append_chassis_name: - append_chassis_name = False + firmware_path = parser[self.__pcp.FIRMWARE_KEY] - if append_module_na: - append_module_na = False + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path - append_chassis_name = not self.is_chassis_has_components() - chassis_name = self.chassis.get_name() + try: + click.echo("Updating firmware:") + click.echo(TAB + firmware_path) + log_helper.log_fw_update_start(component_path, firmware_path) + component.update_firmware(firmware_path) + log_helper.log_fw_update_end(component_path, firmware_path, True) + except KeyboardInterrupt: + log_helper.log_fw_update_end(component_path, firmware_path, False, "Keyboard interrupt") + raise + except Exception as e: + log_helper.log_fw_update_end(component_path, firmware_path, False, e) + raise + def is_firmware_update_available(self, chassis_name, module_name, component_name): if self.is_modular_chassis(): - for module_name, module_component_map in self.module_component_map.items(): - append_module_name = True - - for module_component_name, module_component in module_component_map.items(): - component = self.__pcp.module_component_map[module_name][module_component_name] - component_path = "{}/{}/{}".format( - self.chassis.get_name(), - module_name, - module_component_name - ) - - firmware_version_current = module_component.get_firmware_version() - - status = self.FW_STATUS_UP_TO_DATE - - if component: - firmware_path = component[self.__pcp.FIRMWARE_KEY] - firmware_version_available = component[self.__pcp.VERSION_KEY] - - if self.__root_path is not None: - firmware_path = self.__root_path + firmware_path - - if force or firmware_version_current != firmware_version_available: - result = False + component = self.__pcp.module_component_map[module_name][component_name] + else: + component = self.__pcp.chassis_component_map[chassis_name][component_name] - try: - click.echo("Installing firmware:") - click.echo(TAB + firmware_path) + if not component: + return False - log_helper.log_fw_install_start(component_path, firmware_path) + return True - if not os.path.exists(firmware_path): - raise RuntimeError("Path \"{}\" does not exist".format(firmware_path)) + def is_firmware_update_required(self, chassis_name, module_name, component_name): + if self.is_modular_chassis(): + component = self.module_component_map[module_name][component_name] + parser = self.__pcp.module_component_map[module_name][component_name] + else: + component = self.chassis_component_map[chassis_name][component_name] + parser = self.__pcp.chassis_component_map[chassis_name][component_name] - result = module_component.install_firmware(firmware_path) - log_helper.log_fw_install_end(component_path, firmware_path, result) - except Exception as e: - log_helper.log_fw_install_end(component_path, firmware_path, False, e) - log_helper.print_error(str(e)) + if not parser: + return False - status = self.FW_STATUS_UPDATE_SUCCESS if result else self.FW_STATUS_UPDATE_FAILURE + firmware_path = parser[self.__pcp.FIRMWARE_KEY] - status_table.append( - [ - chassis_name if append_chassis_name else EMPTY, - module_name if append_module_name else EMPTY, - module_component_name, - status, - ] - ) + if self.__root_path is not None: + firmware_path = self.__root_path + firmware_path - if append_chassis_name: - append_chassis_name = False + firmware_version_current = component.get_firmware_version() - if append_module_name: - append_module_name = False + if self.__pcp.VERSION_KEY in parser: + firmware_version_available = parser[self.__pcp.VERSION_KEY] + else: + firmware_version_available = component.get_available_firmware_version(firmware_path) - return tabulate(status_table, self.RESULT_HEADER, tablefmt=self.FORMAT) + return firmware_version_current != firmware_version_available class ComponentStatusProvider(PlatformDataProvider): diff --git a/fwutil/log.py b/fwutil/log.py index a686c437ef..69d60a28f5 100755 --- a/fwutil/log.py +++ b/fwutil/log.py @@ -6,8 +6,8 @@ # try: - import click import syslog + import click except ImportError as e: raise ImportError("Required module not found: {}".format(str(e))) @@ -58,6 +58,7 @@ class LogHelper(object): """ FW_ACTION_DOWNLOAD = "download" FW_ACTION_INSTALL = "install" + FW_ACTION_UPDATE = "update" STATUS_SUCCESS = "success" STATUS_FAILURE = "failure" @@ -122,8 +123,17 @@ def log_fw_install_start(self, component, firmware): def log_fw_install_end(self, component, firmware, status, exception=None): self.__log_fw_action_end(self.FW_ACTION_INSTALL, component, firmware, status, exception) + def log_fw_update_start(self, component, firmware): + self.__log_fw_action_start(self.FW_ACTION_UPDATE, component, firmware) + + def log_fw_update_end(self, component, firmware, status, exception=None): + self.__log_fw_action_end(self.FW_ACTION_UPDATE, component, firmware, status, exception) + def print_error(self, msg): click.echo("Error: {}.".format(msg)) def print_warning(self, msg): click.echo("Warning: {}.".format(msg)) + + def print_info(self, msg): + click.echo("Info: {}.".format(msg)) diff --git a/fwutil/main.py b/fwutil/main.py index c320929cc5..703a5fa9f3 100755 --- a/fwutil/main.py +++ b/fwutil/main.py @@ -6,8 +6,9 @@ # try: - import click import os + import click + from lib import PlatformDataProvider, ComponentStatusProvider, ComponentUpdateProvider from lib import URL, SquashFs from log import LogHelper @@ -16,7 +17,7 @@ # ========================= Constants ========================================== -VERSION = '1.0.0.0' +VERSION = '2.0.0.0' CHASSIS_NAME_CTX_KEY = "chassis_name" MODULE_NAME_CTX_KEY = "module_name" @@ -51,6 +52,11 @@ def cli_abort(ctx, msg): ctx.abort() +def cli_exit(ctx, msg): + log_helper.print_info(msg) + ctx.exit(EXIT_SUCCESS) + + def cli_init(ctx): if os.geteuid() != ROOT_UID: cli_abort(ctx, "Root privileges are required") @@ -64,7 +70,6 @@ def cli_init(ctx): @click.pass_context def cli(ctx): """fwutil - Command-line utility for interacting with platform components""" - cli_init(ctx) @@ -76,13 +81,39 @@ def install(ctx): ctx.obj[COMPONENT_PATH_CTX_KEY] = [ ] +# 'update' group +@cli.group() +@click.pass_context +def update(ctx): + """Update platform firmware""" + ctx.obj[COMPONENT_PATH_CTX_KEY] = [ ] + + +def chassis_handler(ctx): + ctx.obj[CHASSIS_NAME_CTX_KEY] = pdp.chassis.get_name() + ctx.obj[COMPONENT_PATH_CTX_KEY].append(pdp.chassis.get_name()) + + # 'chassis' subgroup @click.group() @click.pass_context -def chassis(ctx): +def chassis_install(ctx): """Install chassis firmware""" - ctx.obj[CHASSIS_NAME_CTX_KEY] = pdp.chassis.get_name() + chassis_handler(ctx) + + +# 'chassis' subgroup +@click.group() +@click.pass_context +def chassis_update(ctx): + """Update chassis firmware""" + chassis_handler(ctx) + + +def module_handler(ctx, module_name): + ctx.obj[MODULE_NAME_CTX_KEY] = module_name ctx.obj[COMPONENT_PATH_CTX_KEY].append(pdp.chassis.get_name()) + ctx.obj[COMPONENT_PATH_CTX_KEY].append(module_name) def validate_module(ctx, param, value): @@ -102,11 +133,22 @@ def validate_module(ctx, param, value): @click.group() @click.argument('module_name', metavar='', callback=validate_module) @click.pass_context -def module(ctx, module_name): +def module_install(ctx, module_name): """Install module firmware""" - ctx.obj[MODULE_NAME_CTX_KEY] = module_name - ctx.obj[COMPONENT_PATH_CTX_KEY].append(pdp.chassis.get_name()) - ctx.obj[COMPONENT_PATH_CTX_KEY].append(module_name) + module_handler(ctx, module_name) + + +# 'module' subgroup +@click.group() +@click.argument('module_name', metavar='', callback=validate_module) +@click.pass_context +def module_update(ctx, module_name): + """Update module firmware""" + module_handler(ctx, module_name) + + +def component_handler(ctx, component_name): + ctx.obj[COMPONENT_PATH_CTX_KEY].append(component_name) def validate_component(ctx, param, value): @@ -132,9 +174,18 @@ def validate_component(ctx, param, value): @click.group() @click.argument('component_name', metavar='', callback=validate_component) @click.pass_context -def component(ctx, component_name): +def component_install(ctx, component_name): """Install component firmware""" - ctx.obj[COMPONENT_PATH_CTX_KEY].append(component_name) + component_handler(ctx, component_name) + + +# 'component' subgroup +@click.group() +@click.argument('component_name', metavar='', callback=validate_component) +@click.pass_context +def component_update(ctx, component_name): + """Update component firmware""" + component_handler(ctx, component_name) def install_fw(ctx, fw_path): @@ -149,6 +200,9 @@ def install_fw(ctx, fw_path): log_helper.log_fw_install_start(component_path, fw_path) status = component.install_firmware(fw_path) log_helper.log_fw_install_end(component_path, fw_path, status) + except KeyboardInterrupt: + log_helper.log_fw_install_end(component_path, fw_path, False, "Keyboard interrupt") + raise except Exception as e: log_helper.log_fw_install_end(component_path, fw_path, False, e) cli_abort(ctx, str(e)) @@ -168,6 +222,9 @@ def download_fw(ctx, url): log_helper.log_fw_download_start(component_path, str(url)) filename, headers = url.retrieve() log_helper.log_fw_download_end(component_path, str(url), True) + except KeyboardInterrupt: + log_helper.log_fw_download_end(component_path, str(url), False, "Keyboard interrupt") + raise except Exception as e: log_helper.log_fw_download_end(component_path, str(url), False, e) cli_abort(ctx, str(e)) @@ -191,37 +248,50 @@ def validate_fw(ctx, param, value): # 'fw' subcommand -@component.command() +@component_install.command(name='fw') @click.option('-y', '--yes', 'yes', is_flag=True, show_default=True, help="Assume \"yes\" as answer to all prompts and run non-interactively") @click.argument('fw_path', metavar='', callback=validate_fw) @click.pass_context -def fw(ctx, yes, fw_path): - """Install firmware from local binary or URL""" - if not yes: - click.confirm("New firmware will be installed, continue?", abort=True) - +def fw_install(ctx, yes, fw_path): + """Install firmware from local path or URL""" url = None - if URL_CTX_KEY in ctx.obj: - url = ctx.obj[URL_CTX_KEY] - fw_path = download_fw(ctx, url) - try: + if URL_CTX_KEY in ctx.obj: + url = ctx.obj[URL_CTX_KEY] + fw_path = download_fw(ctx, url) + + component = ctx.obj[COMPONENT_CTX_KEY] + + notification = component.get_firmware_update_notification(fw_path) + if notification: + log_helper.print_warning(notification) + + if not yes: + click.confirm("New firmware will be installed, continue?", abort=True) + install_fw(ctx, fw_path) finally: if url is not None and os.path.exists(fw_path): os.remove(fw_path) -# 'update' subgroup -@cli.command() +# 'fw' subcommand +@component_update.command(name='fw') @click.option('-y', '--yes', 'yes', is_flag=True, show_default=True, help="Assume \"yes\" as answer to all prompts and run non-interactively") -@click.option('-f', '--force', 'force', is_flag=True, show_default=True, help="Install firmware regardless the current version") -@click.option('-i', '--image', 'image', type=click.Choice(["current", "next"]), default="current", show_default=True, help="Update firmware using current/next image") +@click.option('-f', '--force', 'force', is_flag=True, show_default=True, help="Update firmware regardless the current version") +@click.option('-i', '--image', 'image', type=click.Choice(["current", "next"]), default="current", show_default=True, help="Update firmware using current/next SONiC image") @click.pass_context -def update(ctx, yes, force, image): - """Update platform firmware""" - aborted = False +def fw_update(ctx, yes, force, image): + """Update firmware from SONiC image""" + if CHASSIS_NAME_CTX_KEY in ctx.obj: + chassis_name = ctx.obj[CHASSIS_NAME_CTX_KEY] + module_name = None + elif MODULE_NAME_CTX_KEY in ctx.obj: + chassis_name = pdp.chassis.get_name() + module_name = ctx.obj[MODULE_NAME_CTX_KEY] + + component_name = ctx.obj[COMPONENT_CTX_KEY].get_name() try: squashfs = None @@ -239,32 +309,30 @@ def update(ctx, yes, force, image): else: cup = ComponentUpdateProvider() - click.echo(cup.get_status(force)) + if not cup.is_firmware_update_available(chassis_name, module_name, component_name): + cli_exit(ctx, "Firmware update is not available") - if not yes: - click.confirm("New firmware will be installed, continue?", abort=True) - - result = cup.update_firmware(force) + if not (cup.is_firmware_update_required(chassis_name, module_name, component_name) or force): + cli_exit(ctx, "Firmware is up-to-date") - click.echo() - click.echo("Summary:") - click.echo() + notification = cup.get_notification(chassis_name, module_name, component_name) + if notification: + log_helper.print_warning(notification) - click.echo(result) - except click.Abort: - aborted = True - except Exception as e: - aborted = True - click.echo("Error: " + str(e) + ". Aborting...") + if not yes: + click.confirm("New firmware will be installed, continue?", abort=True) - if image == IMAGE_NEXT and squashfs is not None: - squashfs.umount_next_image_fs() + cup.update_firmware(chassis_name, module_name, component_name) + finally: + if squashfs is not None: + squashfs.umount_next_image_fs() + except click.exceptions.Abort: + ctx.abort() + except click.exceptions.Exit as e: + ctx.exit(e.exit_code) except Exception as e: cli_abort(ctx, str(e)) - if aborted: - ctx.abort() - # 'show' subgroup @cli.group() @@ -273,6 +341,40 @@ def show(): pass +# 'updates' subcommand +@show.command() +@click.option('-i', '--image', 'image', type=click.Choice(["current", "next"]), default="current", show_default=True, help="Show updates using current/next SONiC image") +@click.pass_context +def updates(ctx, image): + """Show available updates""" + try: + squashfs = None + + try: + if image == IMAGE_NEXT: + squashfs = SquashFs() + + if squashfs.is_next_boot_set(): + fs_path = squashfs.mount_next_image_fs() + cup = ComponentUpdateProvider(fs_path) + else: + log_helper.print_warning("Next boot is set to current: fallback to defaults") + cup = ComponentUpdateProvider() + else: + cup = ComponentUpdateProvider() + + status = cup.get_status() + if status is not None: + click.echo(status) + else: + log_helper.print_info("Firmware updates are not available") + finally: + if squashfs is not None: + squashfs.umount_next_image_fs() + except Exception as e: + cli_abort(ctx, str(e)) + + # 'status' subcommand @show.command() @click.pass_context @@ -291,11 +393,17 @@ def version(): """Show utility version""" click.echo("fwutil version {0}".format(VERSION)) -install.add_command(chassis) -install.add_command(module) +install.add_command(chassis_install, name='chassis') +install.add_command(module_install, name='module') + +update.add_command(chassis_update, name='chassis') +update.add_command(module_update, name='module') + +chassis_install.add_command(component_install, name='component') +module_install.add_command(component_install, name='component') -chassis.add_command(component) -module.add_command(component) +chassis_update.add_command(component_update, name='component') +module_update.add_command(component_update, name='component') # ========================= CLI entrypoint ===================================== diff --git a/show/main.py b/show/main.py index 981673910d..abae54490c 100755 --- a/show/main.py +++ b/show/main.py @@ -1738,13 +1738,6 @@ def pcieinfo(check, verbose): cmd = "pcieutil pcie_check" run_command(cmd, display_cmd=verbose) -# 'firmware' subcommand ("show platform firmware") -@platform.command() -def firmware(): - """Show firmware status information""" - cmd = "fwutil show status" - run_command(cmd) - # 'fan' subcommand ("show platform fan") @platform.command() def fan(): @@ -1758,7 +1751,25 @@ def temperature(): """Show device temperature information""" cmd = 'tempershow' run_command(cmd) - + +# 'firmware' subcommand ("show platform firmware") +@platform.command( + context_settings=dict( + ignore_unknown_options=True, + allow_extra_args=True + ), + add_help_option=False +) +@click.argument('args', nargs=-1, type=click.UNPROCESSED) +def firmware(args): + """Show firmware information""" + cmd = "fwutil show {}".format(" ".join(args)) + + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError as e: + sys.exit(e.returncode) + # # 'logging' command ("show logging") # From 5fe40b6451178917568aed5c08660ac32d3364c8 Mon Sep 17 00:00:00 2001 From: Abhishek Dosi Date: Sun, 21 Jun 2020 12:28:34 -0700 Subject: [PATCH 092/111] Fix for command. show interface transceiver eeprom -d Ethernet Make sure key is present in dom_info_dict and then only parse else skip. --- scripts/sfpshow | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 01970b3191..2d12774d44 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -141,7 +141,7 @@ class SFPShow(object): ident = ' ' seperator = ": " for key in sorted_key_table: - if dom_info_dict is not None and dom_info_dict[key] != 'N/A': + if dom_info_dict is not None and key in dom_info_dict and dom_info_dict[key] != 'N/A': current_val = (ident + ident + dom_value_map[key]) current_val = (current_val + seperator.rjust(len(seperator) + From fce546d86c0374b5fb090b4ba0970c3a8796801e Mon Sep 17 00:00:00 2001 From: "ruijie.com.cn" Date: Wed, 24 Jun 2020 13:26:12 +0800 Subject: [PATCH 093/111] [master]fix #4716 show ipv6 interfaces neighbor_ip is N/A issue (#948) Signed-off-by: tim-rj --- show/main.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/show/main.py b/show/main.py index abae54490c..6b0f6f8484 100755 --- a/show/main.py +++ b/show/main.py @@ -1546,6 +1546,7 @@ def interfaces(): if netifaces.AF_INET6 in ipaddresses: ifaddresses = [] + neighbor_info = [] for ipaddr in ipaddresses[netifaces.AF_INET6]: neighbor_name = 'N/A' neighbor_ip = 'N/A' @@ -1557,6 +1558,7 @@ def interfaces(): neighbor_ip = bgp_peer[local_ip][1] except Exception: pass + neighbor_info.append([neighbor_name, neighbor_ip]) if len(ifaddresses) > 0: admin = get_if_admin_state(iface) @@ -1567,9 +1569,11 @@ def interfaces(): master = get_if_master(iface) if get_interface_mode() == "alias": iface = iface_alias_converter.name_to_alias(iface) - data.append([iface, master, ifaddresses[0][1], admin + "/" + oper, neighbor_name, neighbor_ip]) - for ifaddr in ifaddresses[1:]: - data.append(["", "", ifaddr[1], ""]) + data.append([iface, master, ifaddresses[0][1], admin + "/" + oper, neighbor_info[0][0], neighbor_info[0][1]]) + neighbor_info.pop(0) + for ifaddr in ifaddresses[1:]: + data.append(["", "", ifaddr[1], admin + "/" + oper, neighbor_info[0][0], neighbor_info[0][1]]) + neighbor_info.pop(0) print tabulate(data, header, tablefmt="simple", stralign='left', missingval="") From 38bfdc62ae0178f89b8127f7c00e33495a455b87 Mon Sep 17 00:00:00 2001 From: Tamer Ahmed Date: Wed, 24 Jun 2020 09:58:28 -0700 Subject: [PATCH 094/111] [filter-fdb] Check VLAN Presence When Filter FDB (#957) * [filter-fdb] Check VLAN Presence When Filter FDB FTOS fast conversion script generates bogus vlan that does not exist. This PR uses config_db in order to verify that provided vlans exist in the switch configuration. signed-off-by: Tamer Ahmed * review comments making lgtm happy Added two more test cases * Update existing test case and adding new one * adding support for filter ou based on vlan ip network --- scripts/fast-reboot | 3 +- scripts/filter_fdb_entries.py | 52 +- .../filter_fdb_entries_test.py | 11 +- .../filter_fdb_input/config_db.json | 2517 +++++++++++++++++ .../filter_fdb_input/test_vectors.py | 203 ++ 5 files changed, 2775 insertions(+), 11 deletions(-) create mode 100644 sonic-utilities-tests/filter_fdb_input/config_db.json diff --git a/scripts/fast-reboot b/scripts/fast-reboot index d251246776..0d874e7f14 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -415,6 +415,7 @@ if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then # Dump the ARP and FDB tables to files also as default routes for both IPv4 and IPv6 # into /host/fast-reboot DUMP_DIR=/host/fast-reboot + CONFIG_DB_FILE=/etc/sonic/config_db.json mkdir -p $DUMP_DIR FAST_REBOOT_DUMP_RC=0 /usr/bin/fast-reboot-dump.py -t $DUMP_DIR || FAST_REBOOT_DUMP_RC=$? @@ -426,7 +427,7 @@ if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then FILTER_FDB_ENTRIES_RC=0 # Filter FDB entries using MAC addresses from ARP table - /usr/bin/filter_fdb_entries.py -f $DUMP_DIR/fdb.json -a $DUMP_DIR/arp.json || FILTER_FDB_ENTRIES_RC=$? + /usr/bin/filter_fdb_entries.py -f $DUMP_DIR/fdb.json -a $DUMP_DIR/arp.json -c $CONFIG_DB_FILE || FILTER_FDB_ENTRIES_RC=$? if [[ FILTER_FDB_ENTRIES_RC -ne 0 ]]; then error "Failed to filter FDb entries. Exit code: $FILTER_FDB_ENTRIES_RC" unload_kernel diff --git a/scripts/filter_fdb_entries.py b/scripts/filter_fdb_entries.py index 1efe30ebe4..d7f93d3e1e 100755 --- a/scripts/filter_fdb_entries.py +++ b/scripts/filter_fdb_entries.py @@ -8,9 +8,36 @@ import traceback import time +from ipaddress import ip_address, ip_network, ip_interface from collections import defaultdict -def get_arp_entries_map(filename): +def get_vlan_cidr_map(filename): + """ + Generate Vlan CIDR information from Config DB file + + fdb entries could be contaminated with foreigh Vlan entries as seen in the case of + FTOS fast conversion. SONiC Vlan CIDR configuration will be used to filter out + those invalid Vlan entries out. + + Args: + filename(str): Config DB data file + + Returns: + vlan_cidr(dict) map of Vlan CIDR configuration for SONiC device + """ + with open(filename, 'r') as fp: + config_db_entries = json.load(fp) + + vlan_cidr = defaultdict() + if "VLAN_INTERFACE" in config_db_entries.keys() and "VLAN" in config_db_entries.keys(): + for vlan_key in config_db_entries["VLAN_INTERFACE"].keys(): + vlan, cidr = tuple(vlan_key.split('|')) + if vlan in config_db_entries["VLAN"]: + vlan_cidr[vlan] = ip_interface(cidr).network + + return vlan_cidr + +def get_arp_entries_map(arp_filename, config_db_filename): """ Generate map for ARP entries @@ -18,23 +45,30 @@ def get_arp_entries_map(filename): to match FDB table formatting Args: - filename(str): ARP entry file name + arp_filename(str): ARP entry file name + config_db_filename(str): Config DB file name Returns: arp_map(dict) map of ARP entries using MAC as key. """ - with open(filename, 'r') as fp: + vlan_cidr = get_vlan_cidr_map(config_db_filename) + + with open(arp_filename, 'r') as fp: arp_entries = json.load(fp) arp_map = defaultdict() for arp in arp_entries: for key, config in arp.items(): - if 'NEIGH_TABLE' in key: + if "NEIGH_TABLE" not in key: + continue + table, vlan, ip = tuple(key.split(':')) + if "NEIGH_TABLE" in table and vlan in vlan_cidr.keys() \ + and ip_address(ip) in ip_network(vlan_cidr[vlan]) and "neigh" in config.keys(): arp_map[config["neigh"].replace(':', '-')] = "" return arp_map -def filter_fdb_entries(fdb_filename, arp_filename, backup_file): +def filter_fdb_entries(fdb_filename, arp_filename, config_db_filename, backup_file): """ Filter FDB entries based on MAC presence into ARP entries @@ -44,12 +78,13 @@ def filter_fdb_entries(fdb_filename, arp_filename, backup_file): Args: fdb_filename(str): FDB entries file name arp_filename(str): ARP entry file name + config_db_filename(str): Config DB file name backup_file(bool): Create backup copy of FDB file before creating new one Returns: None """ - arp_map = get_arp_entries_map(arp_filename) + arp_map = get_arp_entries_map(arp_filename, config_db_filename) with open(fdb_filename, 'r') as fp: fdb_entries = json.load(fp) @@ -91,20 +126,23 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--fdb', type=str, default='/tmp/fdb.json', help='fdb file name') parser.add_argument('-a', '--arp', type=str, default='/tmp/arp.json', help='arp file name') + parser.add_argument('-c', '--config_db', type=str, default='/tmp/config_db.json', help='config db file name') parser.add_argument('-b', '--backup_file', type=bool, default=True, help='Back up old fdb entries file') args = parser.parse_args() fdb_filename = args.fdb arp_filename = args.arp + config_db_filename = args.config_db backup_file = args.backup_file try: file_exists_or_raise(fdb_filename) file_exists_or_raise(arp_filename) + file_exists_or_raise(config_db_filename) except Exception as e: syslog.syslog(syslog.LOG_ERR, "Got an exception %s: Traceback: %s" % (str(e), traceback.format_exc())) else: - filter_fdb_entries(fdb_filename, arp_filename, backup_file) + filter_fdb_entries(fdb_filename, arp_filename, config_db_filename, backup_file) return 0 diff --git a/sonic-utilities-tests/filter_fdb_entries_test.py b/sonic-utilities-tests/filter_fdb_entries_test.py index 22abeb1f28..af1f7712c3 100644 --- a/sonic-utilities-tests/filter_fdb_entries_test.py +++ b/sonic-utilities-tests/filter_fdb_entries_test.py @@ -14,6 +14,7 @@ class TestFilterFdbEntries(object): """ ARP_FILENAME = "/tmp/arp.json" FDB_FILENAME = "/tmp/fdb.json" + CONFIG_DB_FILENAME = "/tmp/config_db.json" EXPECTED_FDB_FILENAME = "/tmp/expected_fdb.json" def __setUp(self, testData): @@ -45,16 +46,17 @@ def create_file_or_raise(data, filename): Raises: Exception if data type is not supported """ - if isinstance(data, list): + if isinstance(data, list) or isinstance(data, dict): with open(filename, 'w') as fp: json.dump(data, fp, indent=2, separators=(',', ': ')) elif isinstance(data, str): shutil.copyfile(data, filename) else: - raise Exception("Unknown test data type: {0}".format(type(test_data))) + raise Exception("Unknown test data type: {0}".format(type(data))) create_file_or_raise(testData["arp"], self.ARP_FILENAME) create_file_or_raise(testData["fdb"], self.FDB_FILENAME) + create_file_or_raise(testData["config_db"], self.CONFIG_DB_FILENAME) create_file_or_raise(testData["expected_fdb"], self.EXPECTED_FDB_FILENAME) def __tearDown(self): @@ -72,6 +74,7 @@ def __tearDown(self): fdbFiles = glob.glob(self.FDB_FILENAME + '*') for file in fdbFiles: os.remove(file) + os.remove(self.CONFIG_DB_FILENAME) def __runCommand(self, cmds): """ @@ -166,8 +169,10 @@ def testFilterFdbEntries(self, testData): self.ARP_FILENAME, "-f", self.FDB_FILENAME, + "-c", + self.CONFIG_DB_FILENAME, ]) - assert rc == 0, "CFilter_fbd_entries.py failed with '{0}'".format(stderr) + assert rc == 0, "Filter_fdb_entries.py failed with '{0}'".format(stderr) assert self.__verifyOutput(), "Test failed for test data: {0}".format(testData) finally: self.__tearDown() diff --git a/sonic-utilities-tests/filter_fdb_input/config_db.json b/sonic-utilities-tests/filter_fdb_input/config_db.json new file mode 100644 index 0000000000..8c34fcc5b6 --- /dev/null +++ b/sonic-utilities-tests/filter_fdb_input/config_db.json @@ -0,0 +1,2517 @@ +{ + "NTP_SERVER": { + "10.20.8.129": {}, + "10.20.8.130": {} + }, + "TACPLUS_SERVER": { + "100.127.20.21": { + "priority": "1", + "tcp_port": "49" + } + }, + "DEVICE_METADATA": { + "localhost": { + "hwsku": "Force10-S6000", + "default_bgp_status": "down", + "type": "ToRRouter", + "hostname": "str-s6000-acs-14", + "platform": "x86_64-dell_s6000_s1220-r0", + "mac": "f4:8e:38:16:bc:8d", + "default_pfcwd_status": "enable", + "bgp_asn": "65100", + "deployment_id": "1", + "docker_routing_config_mode": "unified" + } + }, + "BGP_PEER_RANGE": { + "BGPSLBPassive": { + "src_address": "10.1.0.32", + "name": "BGPSLBPassive", + "ip_range": [ + "10.255.0.0/25" + ] + }, + "BGPVac": { + "src_address": "10.1.0.32", + "name": "BGPVac", + "ip_range": [ + "192.168.0.0/21" + ] + } + }, + "VLAN": { + "Vlan1000": { + "dhcp_servers": [ + "192.0.0.1", + "192.0.0.2", + "192.0.0.3", + "192.0.0.4", + "192.0.0.5", + "192.0.0.6", + "192.0.0.7", + "192.0.0.8", + "192.0.0.9", + "192.0.0.10", + "192.0.0.11", + "192.0.0.12", + "192.0.0.13", + "192.0.0.14", + "192.0.0.15", + "192.0.0.16", + "192.0.0.17", + "192.0.0.18", + "192.0.0.19", + "192.0.0.20", + "192.0.0.21", + "192.0.0.22", + "192.0.0.23", + "192.0.0.24", + "192.0.0.25", + "192.0.0.26", + "192.0.0.27", + "192.0.0.28", + "192.0.0.29", + "192.0.0.30", + "192.0.0.31", + "192.0.0.32", + "192.0.0.33", + "192.0.0.34", + "192.0.0.35", + "192.0.0.36", + "192.0.0.37", + "192.0.0.38", + "192.0.0.39", + "192.0.0.40", + "192.0.0.41", + "192.0.0.42", + "192.0.0.43", + "192.0.0.44", + "192.0.0.45", + "192.0.0.46", + "192.0.0.47", + "192.0.0.48" + ], + "vlanid": "1000" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "1": "1", + "0": "0", + "3": "3", + "2": "2", + "5": "5", + "4": "4", + "7": "7", + "6": "6" + } + }, + "QUEUE": { + "Ethernet4|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet4|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet4|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet4|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet8|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet8|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet8|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet12|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet12|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet12|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet16|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet16|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet16|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet20|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet20|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet20|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet24|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet24|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet24|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet28|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet28|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet28|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet32|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet32|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet32|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet36|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet36|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet36|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet40|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet40|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet40|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet44|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet44|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet44|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet48|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet48|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet48|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet52|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet52|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet52|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet56|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet56|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet56|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet60|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet60|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet60|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet64|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet64|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet64|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet68|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet68|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet68|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet72|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet72|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet72|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet76|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet76|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet76|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet80|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet80|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet80|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet84|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet84|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet84|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet88|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet88|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet88|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet92|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet92|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet92|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet96|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet96|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet96|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet112|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet112|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet112|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet116|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet116|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet116|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet120|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet120|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet120|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|0": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|1": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|2": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|3": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet124|4": { + "wred_profile": "[WRED_PROFILE|AZURE_LOSSLESS]", + "scheduler": "[SCHEDULER|scheduler.1]" + }, + "Ethernet124|5": { + "scheduler": "[SCHEDULER|scheduler.0]" + }, + "Ethernet124|6": { + "scheduler": "[SCHEDULER|scheduler.0]" + } + }, + "PORTCHANNEL_MEMBER": { + "PortChannel0001|Ethernet112": {}, + "PortChannel0002|Ethernet116": {}, + "PortChannel0003|Ethernet120": {}, + "PortChannel0004|Ethernet124": {} + }, + "PORT": { + "Ethernet0": { + "index": "0", + "lanes": "29,30,31,32", + "description": "fortyGigE0/0", + "mtu": "9100", + "alias": "fortyGigE0/0", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet4": { + "index": "1", + "lanes": "25,26,27,28", + "description": "Servers0:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/4", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet8": { + "index": "2", + "lanes": "37,38,39,40", + "description": "Servers1:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/8", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet12": { + "index": "3", + "lanes": "33,34,35,36", + "description": "Servers2:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/12", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet16": { + "index": "4", + "lanes": "41,42,43,44", + "description": "Servers3:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/16", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet20": { + "index": "5", + "lanes": "45,46,47,48", + "description": "Servers4:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/20", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet24": { + "index": "6", + "lanes": "5,6,7,8", + "description": "Servers5:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/24", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet28": { + "index": "7", + "lanes": "1,2,3,4", + "description": "Servers6:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/28", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet32": { + "index": "8", + "lanes": "9,10,11,12", + "description": "Servers7:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/32", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet36": { + "index": "9", + "lanes": "13,14,15,16", + "description": "Servers8:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/36", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet40": { + "index": "10", + "lanes": "21,22,23,24", + "description": "Servers9:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/40", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet44": { + "index": "11", + "lanes": "17,18,19,20", + "description": "Servers10:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/44", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet48": { + "index": "12", + "lanes": "49,50,51,52", + "description": "Servers11:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/48", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet52": { + "index": "13", + "lanes": "53,54,55,56", + "description": "Servers12:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/52", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet56": { + "index": "14", + "lanes": "61,62,63,64", + "description": "Servers13:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/56", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet60": { + "index": "15", + "lanes": "57,58,59,60", + "description": "Servers14:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/60", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet64": { + "index": "16", + "lanes": "65,66,67,68", + "description": "Servers15:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/64", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet68": { + "index": "17", + "lanes": "69,70,71,72", + "description": "Servers16:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/68", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet72": { + "index": "18", + "lanes": "77,78,79,80", + "description": "Servers17:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/72", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet76": { + "index": "19", + "lanes": "73,74,75,76", + "description": "Servers18:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/76", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet80": { + "index": "20", + "lanes": "105,106,107,108", + "description": "Servers19:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/80", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet84": { + "index": "21", + "lanes": "109,110,111,112", + "description": "Servers20:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/84", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet88": { + "index": "22", + "lanes": "117,118,119,120", + "description": "Servers21:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/88", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet92": { + "index": "23", + "lanes": "113,114,115,116", + "description": "Servers22:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/92", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet96": { + "index": "24", + "lanes": "121,122,123,124", + "description": "Servers23:eth0", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/96", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet100": { + "index": "25", + "lanes": "125,126,127,128", + "description": "fortyGigE0/100", + "mtu": "9100", + "alias": "fortyGigE0/100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet104": { + "index": "26", + "lanes": "85,86,87,88", + "description": "fortyGigE0/104", + "mtu": "9100", + "alias": "fortyGigE0/104", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet108": { + "index": "27", + "lanes": "81,82,83,84", + "description": "fortyGigE0/108", + "mtu": "9100", + "alias": "fortyGigE0/108", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet112": { + "index": "28", + "lanes": "89,90,91,92", + "description": "ARISTA01T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/112", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet116": { + "index": "29", + "lanes": "93,94,95,96", + "description": "ARISTA02T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/116", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet120": { + "index": "30", + "lanes": "97,98,99,100", + "description": "ARISTA03T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/120", + "admin_status": "up", + "speed": "40000" + }, + "Ethernet124": { + "index": "31", + "lanes": "101,102,103,104", + "description": "ARISTA04T1:Ethernet1", + "pfc_asym": "off", + "mtu": "9100", + "alias": "fortyGigE0/124", + "admin_status": "up", + "speed": "40000" + } + }, + "SYSLOG_SERVER": { + "10.3.145.8": {}, + "100.127.20.21": {} + }, + "CRM": { + "Config": { + "acl_table_threshold_type": "percentage", + "nexthop_group_threshold_type": "percentage", + "fdb_entry_high_threshold": "85", + "acl_entry_threshold_type": "percentage", + "ipv6_neighbor_low_threshold": "70", + "nexthop_group_member_low_threshold": "70", + "acl_group_high_threshold": "85", + "ipv4_route_high_threshold": "85", + "acl_counter_high_threshold": "85", + "ipv4_route_low_threshold": "70", + "ipv4_route_threshold_type": "percentage", + "ipv4_neighbor_low_threshold": "70", + "acl_group_threshold_type": "percentage", + "ipv4_nexthop_high_threshold": "85", + "ipv6_route_threshold_type": "percentage", + "nexthop_group_low_threshold": "70", + "ipv4_neighbor_high_threshold": "85", + "ipv6_route_high_threshold": "85", + "ipv6_nexthop_threshold_type": "percentage", + "polling_interval": "300", + "ipv4_nexthop_threshold_type": "percentage", + "acl_group_low_threshold": "70", + "acl_entry_low_threshold": "70", + "nexthop_group_member_threshold_type": "percentage", + "ipv4_nexthop_low_threshold": "70", + "acl_counter_threshold_type": "percentage", + "ipv6_neighbor_high_threshold": "85", + "nexthop_group_member_high_threshold": "85", + "acl_table_low_threshold": "70", + "fdb_entry_threshold_type": "percentage", + "ipv6_neighbor_threshold_type": "percentage", + "acl_table_high_threshold": "85", + "ipv6_nexthop_low_threshold": "70", + "acl_counter_low_threshold": "70", + "ipv4_neighbor_threshold_type": "percentage", + "nexthop_group_high_threshold": "85", + "ipv6_route_low_threshold": "70", + "acl_entry_high_threshold": "85", + "fdb_entry_low_threshold": "70", + "ipv6_nexthop_high_threshold": "85" + } + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.0.1/21": {} + }, + "BUFFER_PG": { + "Ethernet4|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet8|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet12|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet16|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet20|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet24|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet28|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet32|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet36|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet40|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet44|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet48|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet52|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet56|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet60|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet64|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet68|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet72|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet76|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet80|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet84|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet88|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet92|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet96|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet112|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet116|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet120|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + }, + "Ethernet124|0": { + "profile": "[BUFFER_PROFILE|ingress_lossy_profile]" + } + }, + "BGP_NEIGHBOR": { + "10.0.0.57": { + "rrclient": "0", + "name": "ARISTA01T1", + "local_addr": "10.0.0.56", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "10.0.0.59": { + "rrclient": "0", + "name": "ARISTA02T1", + "local_addr": "10.0.0.58", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "10.0.0.61": { + "rrclient": "0", + "name": "ARISTA03T1", + "local_addr": "10.0.0.60", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "10.0.0.63": { + "rrclient": "0", + "name": "ARISTA04T1", + "local_addr": "10.0.0.62", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::7a": { + "rrclient": "0", + "name": "ARISTA03T1", + "local_addr": "fc00::79", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::7e": { + "rrclient": "0", + "name": "ARISTA04T1", + "local_addr": "fc00::7d", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::72": { + "rrclient": "0", + "name": "ARISTA01T1", + "local_addr": "fc00::71", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + }, + "fc00::76": { + "rrclient": "0", + "name": "ARISTA02T1", + "local_addr": "fc00::75", + "nhopself": "0", + "holdtime": "10", + "asn": "64600", + "keepalive": "3" + } + }, + "PORTCHANNEL_INTERFACE": { + "PortChannel0001|10.0.0.56/31": {}, + "PortChannel0001|FC00::71/126": {}, + "PortChannel0002|10.0.0.58/31": {}, + "PortChannel0002|FC00::75/126": {}, + "PortChannel0003|10.0.0.60/31": {}, + "PortChannel0003|FC00::79/126": {}, + "PortChannel0004|10.0.0.62/31": {}, + "PortChannel0004|FC00::7D/126": {} + }, + "PFC_WD": { + "Ethernet4": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet8": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet12": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet16": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet20": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet24": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet28": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet32": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet36": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet40": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet44": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet48": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet52": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet56": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet60": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet64": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet68": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet72": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet76": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet80": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet84": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet88": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet92": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet96": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet112": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet116": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet120": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "Ethernet124": { + "action": "drop", + "detection_time": "200", + "restoration_time": "200" + }, + "GLOBAL": { + "POLL_INTERVAL": "200" + } + }, + "PORTCHANNEL": { + "PortChannel0001": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet112" + ], + "mtu": "9100" + }, + "PortChannel0002": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet116" + ], + "mtu": "9100" + }, + "PortChannel0003": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet120" + ], + "mtu": "9100" + }, + "PortChannel0004": { + "admin_status": "up", + "min_links": "1", + "members": [ + "Ethernet124" + ], + "mtu": "9100" + } + }, + "LOOPBACK_INTERFACE": { + "Loopback0|10.1.0.32/32": {}, + "Loopback0|FC00:1::32/128": {} + }, + "PORT_QOS_MAP": { + "Ethernet4": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet8": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet12": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet16": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet20": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet24": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet28": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet32": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet36": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet40": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet44": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet48": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet52": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet56": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet60": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet64": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet68": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet72": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet76": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet80": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet84": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet88": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet92": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet96": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet112": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet116": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet120": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + }, + "Ethernet124": { + "tc_to_pg_map": "[TC_TO_PRIORITY_GROUP_MAP|AZURE]", + "tc_to_queue_map": "[TC_TO_QUEUE_MAP|AZURE]", + "pfc_enable": "3,4", + "pfc_to_queue_map": "[MAP_PFC_PRIORITY_TO_QUEUE|AZURE]", + "dscp_to_tc_map": "[DSCP_TO_TC_MAP|AZURE]" + } + }, + "DHCP_SERVER": { + "192.0.0.1": {}, + "192.0.0.2": {}, + "192.0.0.3": {}, + "192.0.0.4": {}, + "192.0.0.5": {}, + "192.0.0.6": {}, + "192.0.0.7": {}, + "192.0.0.8": {}, + "192.0.0.9": {}, + "192.0.0.10": {}, + "192.0.0.11": {}, + "192.0.0.12": {}, + "192.0.0.13": {}, + "192.0.0.14": {}, + "192.0.0.15": {}, + "192.0.0.16": {}, + "192.0.0.17": {}, + "192.0.0.18": {}, + "192.0.0.19": {}, + "192.0.0.20": {}, + "192.0.0.21": {}, + "192.0.0.22": {}, + "192.0.0.23": {}, + "192.0.0.24": {}, + "192.0.0.25": {}, + "192.0.0.26": {}, + "192.0.0.27": {}, + "192.0.0.28": {}, + "192.0.0.29": {}, + "192.0.0.30": {}, + "192.0.0.31": {}, + "192.0.0.32": {}, + "192.0.0.33": {}, + "192.0.0.34": {}, + "192.0.0.35": {}, + "192.0.0.36": {}, + "192.0.0.37": {}, + "192.0.0.38": {}, + "192.0.0.39": {}, + "192.0.0.40": {}, + "192.0.0.41": {}, + "192.0.0.42": {}, + "192.0.0.43": {}, + "192.0.0.44": {}, + "192.0.0.45": {}, + "192.0.0.46": {}, + "192.0.0.47": {}, + "192.0.0.48": {} + }, + "VLAN_MEMBER": { + "Vlan1000|Ethernet4": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet12": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet16": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet20": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet24": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet28": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet32": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet36": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet40": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet44": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet48": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet52": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet56": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet60": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet64": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet68": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet72": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet76": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet80": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet84": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet88": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet92": { + "tagging_mode": "untagged" + }, + "Vlan1000|Ethernet96": { + "tagging_mode": "untagged" + } + }, + "BUFFER_QUEUE": { + "Ethernet4|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet4|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet4|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet8|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet8|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet8|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet12|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet12|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet12|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet16|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet16|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet16|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet20|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet20|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet20|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet24|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet24|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet24|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet28|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet28|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet28|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet32|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet32|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet32|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet36|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet36|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet36|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet40|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet40|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet40|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet44|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet44|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet44|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet48|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet48|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet48|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet52|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet52|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet52|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet56|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet56|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet56|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet60|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet60|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet60|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet64|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet64|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet64|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet68|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet68|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet68|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet72|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet72|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet72|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet76|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet76|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet76|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet80|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet80|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet80|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet84|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet84|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet84|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet88|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet88|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet88|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet92|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet92|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet92|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet96|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet96|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet96|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet112|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet112|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet112|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet116|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet116|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet116|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet120|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet120|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet120|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet124|0-2": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + }, + "Ethernet124|3-4": { + "profile": "[BUFFER_PROFILE|egress_lossless_profile]" + }, + "Ethernet124|5-6": { + "profile": "[BUFFER_PROFILE|egress_lossy_profile]" + } + }, + "WRED_PROFILE": { + "AZURE_LOSSLESS": { + "red_max_threshold": "2097152", + "wred_green_enable": "true", + "ecn": "ecn_all", + "green_min_threshold": "1048576", + "red_min_threshold": "1048576", + "wred_yellow_enable": "true", + "yellow_min_threshold": "1048576", + "green_max_threshold": "2097152", + "green_drop_probability": "5", + "yellow_max_threshold": "2097152", + "wred_red_enable": "true", + "yellow_drop_probability": "5", + "red_drop_probability": "5" + } + }, + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "1": "0", + "0": "0", + "3": "3", + "2": "0", + "5": "0", + "4": "4", + "7": "7", + "6": "0" + } + }, + "DEVICE_NEIGHBOR_METADATA": { + "ARISTA01T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.112", + "hwsku": "Arista-VM", + "type": "LeafRouter" + }, + "ARISTA02T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.113", + "hwsku": "Arista-VM", + "type": "LeafRouter" + }, + "ARISTA03T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.114", + "hwsku": "Arista-VM", + "type": "LeafRouter" + }, + "ARISTA04T1": { + "lo_addr": "None", + "mgmt_addr": "172.16.131.115", + "hwsku": "Arista-VM", + "type": "LeafRouter" + } + }, + "DEVICE_NEIGHBOR": { + "Ethernet4": { + "name": "Servers0", + "port": "eth0" + }, + "Ethernet8": { + "name": "Servers1", + "port": "eth0" + }, + "Ethernet12": { + "name": "Servers2", + "port": "eth0" + }, + "Ethernet16": { + "name": "Servers3", + "port": "eth0" + }, + "Ethernet20": { + "name": "Servers4", + "port": "eth0" + }, + "Ethernet24": { + "name": "Servers5", + "port": "eth0" + }, + "Ethernet28": { + "name": "Servers6", + "port": "eth0" + }, + "Ethernet32": { + "name": "Servers7", + "port": "eth0" + }, + "Ethernet36": { + "name": "Servers8", + "port": "eth0" + }, + "Ethernet40": { + "name": "Servers9", + "port": "eth0" + }, + "Ethernet44": { + "name": "Servers10", + "port": "eth0" + }, + "Ethernet48": { + "name": "Servers11", + "port": "eth0" + }, + "Ethernet52": { + "name": "Servers12", + "port": "eth0" + }, + "Ethernet56": { + "name": "Servers13", + "port": "eth0" + }, + "Ethernet60": { + "name": "Servers14", + "port": "eth0" + }, + "Ethernet64": { + "name": "Servers15", + "port": "eth0" + }, + "Ethernet68": { + "name": "Servers16", + "port": "eth0" + }, + "Ethernet72": { + "name": "Servers17", + "port": "eth0" + }, + "Ethernet76": { + "name": "Servers18", + "port": "eth0" + }, + "Ethernet80": { + "name": "Servers19", + "port": "eth0" + }, + "Ethernet84": { + "name": "Servers20", + "port": "eth0" + }, + "Ethernet88": { + "name": "Servers21", + "port": "eth0" + }, + "Ethernet92": { + "name": "Servers22", + "port": "eth0" + }, + "Ethernet96": { + "name": "Servers23", + "port": "eth0" + }, + "Ethernet112": { + "name": "ARISTA01T1", + "port": "Ethernet1" + }, + "Ethernet116": { + "name": "ARISTA02T1", + "port": "Ethernet1" + }, + "Ethernet120": { + "name": "ARISTA03T1", + "port": "Ethernet1" + }, + "Ethernet124": { + "name": "ARISTA04T1", + "port": "Ethernet1" + } + }, + "DSCP_TO_TC_MAP": { + "AZURE": { + "56": "1", + "54": "1", + "28": "1", + "48": "6", + "29": "1", + "60": "1", + "61": "1", + "62": "1", + "63": "1", + "49": "1", + "34": "1", + "24": "1", + "25": "1", + "26": "1", + "27": "1", + "20": "1", + "21": "1", + "22": "1", + "23": "1", + "46": "5", + "47": "1", + "44": "1", + "45": "1", + "42": "1", + "43": "1", + "40": "1", + "41": "1", + "1": "1", + "0": "1", + "3": "3", + "2": "1", + "5": "2", + "4": "4", + "7": "1", + "6": "1", + "9": "1", + "8": "0", + "35": "1", + "13": "1", + "12": "1", + "15": "1", + "58": "1", + "11": "1", + "10": "1", + "39": "1", + "38": "1", + "59": "1", + "14": "1", + "17": "1", + "16": "1", + "19": "1", + "18": "1", + "31": "1", + "30": "1", + "51": "1", + "36": "1", + "53": "1", + "52": "1", + "33": "1", + "55": "1", + "37": "1", + "32": "1", + "57": "1", + "50": "1" + } + }, + "MGMT_INTERFACE": { + "eth0|10.3.147.17/23": { + "gwaddr": "10.3.146.1" + }, + "eth0|FC00:2::32/64": { + "forced_mgmt_routes": [ + "10.3.145.98/31", + "10.3.145.8", + "100.127.20.16/28", + "10.3.149.170/31", + "40.122.216.24", + "13.91.48.226", + "10.3.145.14", + "10.64.246.0/24", + "10.64.247.0/24" + ], + "gwaddr": "fc00:2::1" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "1": "1", + "0": "0", + "3": "3", + "2": "2", + "5": "5", + "4": "4", + "7": "7", + "6": "6" + } + }, + "MGMT_PORT": { + "eth0": { + "alias": "eth0", + "admin_status": "up" + } + }, + "VERSIONS": { + "DATABASE": { + "VERSION": "version_1_0_1" + } + }, + "ACL_TABLE": { + "DATAACL": { + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004" + ], + "type": "L3", + "policy_desc": "DATAACL", + "stage": "ingress" + }, + "EVERFLOW": { + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004", + "Ethernet24", + "Ethernet40", + "Ethernet20", + "Ethernet44", + "Ethernet48", + "Ethernet28", + "Ethernet96", + "Ethernet92", + "Ethernet76", + "Ethernet72", + "Ethernet52", + "Ethernet80", + "Ethernet56", + "Ethernet32", + "Ethernet16", + "Ethernet36", + "Ethernet12", + "Ethernet60", + "Ethernet8", + "Ethernet4", + "Ethernet64", + "Ethernet68", + "Ethernet84", + "Ethernet88" + ], + "type": "MIRROR", + "policy_desc": "EVERFLOW", + "stage": "ingress" + }, + "EVERFLOWV6": { + "ports": [ + "PortChannel0001", + "PortChannel0002", + "PortChannel0003", + "PortChannel0004", + "Ethernet24", + "Ethernet40", + "Ethernet20", + "Ethernet44", + "Ethernet48", + "Ethernet28", + "Ethernet96", + "Ethernet92", + "Ethernet76", + "Ethernet72", + "Ethernet52", + "Ethernet80", + "Ethernet56", + "Ethernet32", + "Ethernet16", + "Ethernet36", + "Ethernet12", + "Ethernet60", + "Ethernet8", + "Ethernet4", + "Ethernet64", + "Ethernet68", + "Ethernet84", + "Ethernet88" + ], + "type": "MIRRORV6", + "policy_desc": "EVERFLOWV6", + "stage": "ingress" + }, + "SNMP_ACL": { + "services": [ + "SNMP" + ], + "type": "CTRLPLANE", + "policy_desc": "SNMP_ACL", + "stage": "ingress" + }, + "SSH_ONLY": { + "services": [ + "SSH" + ], + "type": "CTRLPLANE", + "policy_desc": "SSH_ONLY", + "stage": "ingress" + } + }, + "CABLE_LENGTH": { + "AZURE": { + "Ethernet8": "5m", + "Ethernet0": "300m", + "Ethernet4": "5m", + "Ethernet108": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet68": "5m", + "Ethernet96": "5m", + "Ethernet124": "40m", + "Ethernet92": "5m", + "Ethernet120": "40m", + "Ethernet52": "5m", + "Ethernet56": "5m", + "Ethernet76": "5m", + "Ethernet72": "5m", + "Ethernet64": "5m", + "Ethernet32": "5m", + "Ethernet16": "5m", + "Ethernet36": "5m", + "Ethernet12": "5m", + "Ethernet88": "5m", + "Ethernet116": "40m", + "Ethernet80": "5m", + "Ethernet112": "40m", + "Ethernet84": "5m", + "Ethernet48": "5m", + "Ethernet44": "5m", + "Ethernet40": "5m", + "Ethernet28": "5m", + "Ethernet60": "5m", + "Ethernet20": "5m", + "Ethernet24": "5m" + } + }, + "SCHEDULER": { + "scheduler.0": { + "type": "DWRR", + "weight": "14" + }, + "scheduler.1": { + "type": "DWRR", + "weight": "15" + } + }, + "BUFFER_POOL": { + "egress_lossless_pool": { + "type": "egress", + "mode": "static", + "size": "12766208" + }, + "egress_lossy_pool": { + "type": "egress", + "mode": "dynamic", + "size": "7326924" + }, + "ingress_lossless_pool": { + "type": "ingress", + "mode": "dynamic", + "size": "12766208" + } + }, + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "static_th": "12766208", + "pool": "[BUFFER_POOL|egress_lossless_pool]", + "size": "0" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "[BUFFER_POOL|egress_lossy_pool]", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "[BUFFER_POOL|ingress_lossless_pool]", + "size": "0" + } + } +} diff --git a/sonic-utilities-tests/filter_fdb_input/test_vectors.py b/sonic-utilities-tests/filter_fdb_input/test_vectors.py index 55d6c136de..cd1592a0a4 100644 --- a/sonic-utilities-tests/filter_fdb_input/test_vectors.py +++ b/sonic-utilities-tests/filter_fdb_input/test_vectors.py @@ -7,6 +7,8 @@ ], "fdb": [ ], + "config_db": { + }, "expected_fdb": [ ], }, @@ -19,6 +21,13 @@ }, "OP": "SET" }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "50:2f:a8:cb:76:7c", + "family": "IPv4" + }, + "OP": "SET" + }, ], "fdb": [ { @@ -29,6 +38,14 @@ "OP": "SET" }, ], + "config_db": { + "VLAN": { + "Vlan1000": {} + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.0.1/21": {} + }, + }, "expected_fdb": [ ], }, @@ -41,6 +58,13 @@ }, "OP": "SET" }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, ], "fdb": [ { @@ -51,7 +75,116 @@ "OP": "SET" }, ], + "config_db": { + "VLAN": { + "Vlan1000": {} + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.0.1/21": {} + }, + }, + "expected_fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|192.168.0.1/21": {} + }, + }, + "expected_fdb": [ + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "50:2f:a8:cb:76:7c", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1:50-2f-a8-cb-76-7c": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|25.103.178.1/21": {} + }, + }, "expected_fdb": [ + { + "FDB_TABLE:Vlan1:50-2f-a8-cb-76-7c": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ { "FDB_TABLE:Vlan1000:72-06-00-01-01-16": { "type": "dynamic", @@ -60,10 +193,80 @@ "OP": "SET" }, ], + "config_db": { + "VLAN": { + "Vlan1000": {} + }, + "VLAN_INTERFACE": { + "Vlan1000|192.168.128.1/21": {} + }, + }, + "expected_fdb": [ + ], + }, + { + "arp":[ + { + "NEIGH_TABLE:Vlan1000:192.168.0.10": { + "neigh": "72:06:00:01:01:16", + "family": "IPv4" + }, + "OP": "SET" + }, + { + "NEIGH_TABLE:Vlan1:25.103.178.129": { + "neigh": "50:2f:a8:cb:76:7c", + "family": "IPv4" + }, + "OP": "SET" + }, + ], + "fdb": [ + { + "FDB_TABLE:Vlan1:50-2f-a8-cb-76-7c": { + "type": "dynamic", + "port": "Ethernet22" + }, + "OP": "SET" + }, + ], + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|25.103.0.1/21": {} + }, + }, + "expected_fdb": [ + ], }, { "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "config_db": "sonic-utilities-tests/filter_fdb_input/config_db.json", "expected_fdb": "sonic-utilities-tests/filter_fdb_input/expected_fdb.json" }, + { + "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", + "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "config_db": { + "VLAN": { + "Vlan1": {} + }, + "VLAN_INTERFACE": { + "Vlan1|192.168.0.1/21": {} + }, + }, + "expected_fdb": [ + ], + }, + { + "arp": "sonic-utilities-tests/filter_fdb_input/arp.json", + "fdb": "sonic-utilities-tests/filter_fdb_input/fdb.json", + "config_db": { + }, + "expected_fdb": [ + ], + }, ] From 634e4834195d838369e336d95ce7f32699287331 Mon Sep 17 00:00:00 2001 From: Longxiang Lyu <35479537+lolyu@users.noreply.github.com> Date: Thu, 25 Jun 2020 17:19:57 +0800 Subject: [PATCH 095/111] [show] Add missing verbose option to "show line" (#961) * Fixing "show line" by adding the missing decorator that supports "--verbose" option. Signed-off-by: lolyu --- show/main.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/show/main.py b/show/main.py index 6b0f6f8484..adaacf36a1 100755 --- a/show/main.py +++ b/show/main.py @@ -2650,7 +2650,8 @@ def reboot_cause(): # 'line' command ("show line") # @cli.command('line') -def line(): +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def line(verbose): """Show all /dev/ttyUSB lines and their info""" cmd = "consutil show" run_command(cmd, display_cmd=verbose) From c28b057da17284045dcc0ac9c7a33a1d145fa229 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Thu, 25 Jun 2020 14:50:27 -0700 Subject: [PATCH 096/111] [sfpshow][mock_state_db] Tweak key names of some transceiver info fields (#958) Change the following key names: - hardwarerev -> hardware_rev - serialnum -> serial - manufacturename -> manufacturer - modelname -> model - Connector -> connector Depends on: - https://github.com/Azure/sonic-platform-common/pull/97 - https://github.com/Azure/sonic-platform-daemons/pull/62 --- scripts/sfpshow | 19 ++++++++++++------- .../mock_tables/state_db.json | 10 +++++----- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/scripts/sfpshow b/scripts/sfpshow index 2d12774d44..bf0b90408a 100755 --- a/scripts/sfpshow +++ b/scripts/sfpshow @@ -24,16 +24,21 @@ try: except KeyError: pass -qsfp_data_map = {'modelname': 'Vendor PN', 'vendor_oui': 'Vendor OUI', +qsfp_data_map = {'model': 'Vendor PN', + 'vendor_oui': 'Vendor OUI', 'vendor_date': 'Vendor Date Code(YYYY-MM-DD Lot)', - 'manufacturename': 'Vendor Name', - 'hardwarerev': 'Vendor Rev', 'serialnum': 'Vendor SN', - 'type': 'Identifier', 'ext_identifier': 'Extended Identifier', + 'manufacturer': 'Vendor Name', + 'hardware_rev': 'Vendor Rev', + 'serial': 'Vendor SN', + 'type': 'Identifier', + 'ext_identifier': 'Extended Identifier', 'ext_rateselect_compliance': 'Extended RateSelect Compliance', - 'cable_length': 'cable_length', 'cable_type': 'Length', + 'cable_length': 'cable_length', + 'cable_type': 'Length', 'nominal_bit_rate': 'Nominal Bit Rate(100Mbs)', - 'specification_compliance':'Specification compliance', - 'encoding': 'Encoding', 'Connector': 'Connector' + 'specification_compliance': 'Specification compliance', + 'encoding': 'Encoding', + 'connector': 'Connector' } sfp_dom_channel_monitor_map = {'rx1power': 'RXPower', diff --git a/sonic-utilities-tests/mock_tables/state_db.json b/sonic-utilities-tests/mock_tables/state_db.json index 924600ae4b..b44b60df9f 100644 --- a/sonic-utilities-tests/mock_tables/state_db.json +++ b/sonic-utilities-tests/mock_tables/state_db.json @@ -1,13 +1,13 @@ { "TRANSCEIVER_INFO|Ethernet0": { "type": "QSFP28 or later", - "hardwarerev": "AC", - "serialnum": "MT1706FT02064", - "manufacturename": "Mellanox", - "modelname": "MFA1A00-C003", + "hardware_rev": "AC", + "serial": "MT1706FT02064", + "manufacturer": "Mellanox", + "model": "MFA1A00-C003", "vendor_oui": "00-02-c9", "vendor_date": "2017-01-13 ", - "Connector": "No separable connector", + "connector": "No separable connector", "encoding": "64B66B", "ext_identifier": "Power Class 3(2.5W max), CDR present in Rx Tx", "ext_rateselect_compliance": "QSFP+ Rate Select Version 1", From 85879ddddfde237d12054a23893778f82be5df8c Mon Sep 17 00:00:00 2001 From: Sangita Maity Date: Thu, 25 Jun 2020 16:32:42 -0700 Subject: [PATCH 097/111] [Show | Command Reference] Add Port breakout Show Command (#859) **- What I did** Implemented show interface breakout sub-command to show the port capability and the current breakout mode. Available commands are mentioned below. ``` show interfaces breakout --help show interfaces breakout current-mode ``` **- How I did it** using platform.json, hwsku.json and config-db BREAKOUT_CFG table. **- How to verify it** ``` admin@lnos-x1-a-fab01:~$ show interfaces breakout --help Usage: show interfaces breakout [OPTIONS] COMMAND [ARGS]... Show interface breakout Options: -?, -h, --help Show this message and exit. Commands: current-mode Show interface breakout current-mode ``` ``` admin@lnos-x1-a-fab01:~$ show interfaces breakout { "Ethernet0": { "index": "1,1,1,1", "default_brkout_mode": "1x100G[40G]", "child ports": "Ethernet0", "child port speed": "100G", "breakout_modes": "1x100G[40G],2x50G,4x25G[10G]", "Current Breakout Mode": "1x100G[40G]", "lanes": "65,66,67,68", "alias_at_lanes": "Eth1/1, Eth1/2, Eth1/3, Eth1/4" }, "Ethernet4": { "index": "2,2,2,2", "default_brkout_mode": "1x100G[40G]", "child ports": "Ethernet4,Ethernet5,Ethernet6,Ethernet7", "child port speed": "25G,10G,25G,25G", "breakout_modes": "1x100G[40G],2x50G,4x25G[10G]", "Current Breakout Mode": "4x25G", "lanes": "69,70,71,72", "alias_at_lanes": "Eth2/1, Eth2/2, Eth2/3, Eth2/4" }, "Ethernet8": { "index": "3,3,3,3", "default_brkout_mode": "1x100G[40G]", "child ports": "Ethernet8", "child port speed": "100G", "breakout_modes": "1x100G[40G],2x50G,4x25G[10G]", "Current Breakout Mode": "1x100G[40G]", "lanes": "73,74,75,76", "alias_at_lanes": "Eth3/1, Eth3/2, Eth3/3, Eth3/4" },... continue } ``` ``` admin@lnos-x1-a-fab01:~$ show interfaces breakout current-mode Ethernet0 +-------------+-------------------------+ | Interface | Current Breakout Mode | +=============+=========================+ | Ethernet0 | 4x25G[10G] | +-------------+-------------------------+ ``` ``` admin@lnos-x1-a-fab01:~$ show interfaces breakout current-mode +-------------+-------------------------+ | Interface | Current Breakout Mode | +=============+=========================+ | Ethernet0 | 4x25G[10G] | +-------------+-------------------------+ | Ethernet4 | 4x25G[10G] | +-------------+-------------------------+ | Ethernet8 | 4x25G[10G] | +-------------+-------------------------+ ``` Signed-off-by: Sangita Maity --- doc/Command-Reference.md | 51 ++++++++ show/main.py | 111 ++++++++++++++++++ .../mock_tables/config_db.json | 9 ++ sonic-utilities-tests/show_breakout_test.py | 65 ++++++++++ 4 files changed, 236 insertions(+) create mode 100644 sonic-utilities-tests/show_breakout_test.py diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 9ee91a88cd..55dfaff599 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -2345,6 +2345,7 @@ Subsequent pages explain each of these commands in detail. -?, -h, --help Show this message and exit. Commands: + breakout Show Breakout Mode information by interfaces counters Show interface counters description Show interface status, protocol and... naming_mode Show interface naming_mode status @@ -2354,6 +2355,56 @@ Subsequent pages explain each of these commands in detail. transceiver Show SFP Transceiver information ``` +**show interfaces breakout** + +This show command displays the port capability for all interfaces i.e. index, lanes, default_brkout_mode, breakout_modes(i.e. all the available breakout modes) and brkout_mode (i.e. current breakout mode). To display current breakout mode, "current-mode" subcommand can be used.For a single interface, provide the interface name with the sub-command. + +- Usage: + ``` + show interfaces breakout + show interfaces breakout current-mode + show interfaces breakout current-mode + ``` + +- Example: + ``` + admin@lnos-x1-a-fab01:~$ show interfaces breakout + { + "Ethernet0": { + "index": "1,1,1,1", + "default_brkout_mode": "1x100G[40G]", + "child ports": "Ethernet0", + "child port speed": "100G", + "breakout_modes": "1x100G[40G],2x50G,4x25G[10G]", + "Current Breakout Mode": "1x100G[40G]", + "lanes": "65,66,67,68", + "alias_at_lanes": "Eth1/1, Eth1/2, Eth1/3, Eth1/4" + },... continue + } + +The "current-mode" subcommand is used to display current breakout mode for all interfaces. + + admin@lnos-x1-a-fab01:~$ show interfaces breakout current-mode + +-------------+-------------------------+ + | Interface | Current Breakout Mode | + +=============+=========================+ + | Ethernet0 | 4x25G[10G] | + +-------------+-------------------------+ + | Ethernet4 | 4x25G[10G] | + +-------------+-------------------------+ + | Ethernet8 | 4x25G[10G] | + +-------------+-------------------------+ + | Ethernet12 | 4x25G[10G] | + +-------------+-------------------------+ + + admin@lnos-x1-a-fab01:~$ show interfaces breakout current-mode Ethernet0 + +-------------+-------------------------+ + | Interface | Current Breakout Mode | + +=============+=========================+ + | Ethernet0 | 4x25G[10G] | + +-------------+-------------------------+ + ``` + **show interfaces counters** This show command displays packet counters for all interfaces since the last time the counters were cleared. To display l3 counters "rif" subcommand can be used. There is no facility to display counters for one specific l2 interface. For l3 interfaces a single interface output mode is present. Optional argument "-a" provides two additional columns - RX-PPS and TX_PPS. diff --git a/show/main.py b/show/main.py index adaacf36a1..95993aca8c 100755 --- a/show/main.py +++ b/show/main.py @@ -9,6 +9,7 @@ import sys import ipaddress from pkg_resources import parse_version +from collections import OrderedDict import click from natsort import natsorted @@ -17,10 +18,16 @@ import sonic_device_util from swsssdk import ConfigDBConnector from swsssdk import SonicV2Connector +from portconfig import get_child_ports import mlnx +# Global Variable +PLATFORM_ROOT_PATH = "/usr/share/sonic/device" +PLATFORM_JSON = 'platform.json' +HWSKU_JSON = 'hwsku.json' SONIC_CFGGEN_PATH = '/usr/local/bin/sonic-cfggen' +PORT_STR = "Ethernet" VLAN_SUB_INTERFACE_SEPARATOR = '.' @@ -181,6 +188,15 @@ def get_routing_stack(): # Global Routing-Stack variable routing_stack = get_routing_stack() +# Read given JSON file +def readJsonFile(fileName): + try: + with open(fileName) as f: + result = json.load(f) + except Exception as e: + click.echo(str(e)) + raise click.Abort() + return result def run_command(command, display_cmd=False, return_cmd=False): if display_cmd: @@ -789,6 +805,101 @@ def alias(interfacename): click.echo(tabulate(body, header)) + +# +# 'breakout' group ### +# +@interfaces.group(invoke_without_command=True) +@click.pass_context +def breakout(ctx): + """Show Breakout Mode information by interfaces""" + # Reading data from Redis configDb + config_db = ConfigDBConnector() + config_db.connect() + ctx.obj = {'db': config_db} + + try: + curBrkout_tbl = config_db.get_table('BREAKOUT_CFG') + except Exception as e: + click.echo("Breakout table is not present in Config DB") + raise click.Abort() + + if ctx.invoked_subcommand is None: + + # Get HWSKU and Platform information + hw_info_dict = get_hw_info_dict() + platform = hw_info_dict['platform'] + hwsku = hw_info_dict['hwsku'] + + # Get port capability from platform and hwsku related files + platformFile = "{}/{}/{}".format(PLATFORM_ROOT_PATH, platform, PLATFORM_JSON) + platformDict = readJsonFile(platformFile)['interfaces'] + hwskuDict = readJsonFile("{}/{}/{}/{}".format(PLATFORM_ROOT_PATH, platform, hwsku, HWSKU_JSON))['interfaces'] + + if not platformDict or not hwskuDict: + click.echo("Can not load port config from {} or {} file".format(PLATFORM_JSON, HWSKU_JSON)) + raise click.Abort() + + for port_name in platformDict.keys(): + curBrkout_mode = curBrkout_tbl[port_name]["brkout_mode"] + + # Update deafult breakout mode and current breakout mode to platformDict + platformDict[port_name].update(hwskuDict[port_name]) + platformDict[port_name]["Current Breakout Mode"] = curBrkout_mode + + # List all the child ports if present + child_portDict = get_child_ports(port_name, curBrkout_mode, platformFile) + if not child_portDict: + click.echo("Cannot find ports from {} file ".format(PLATFORM_JSON)) + raise click.Abort() + + child_ports = natsorted(child_portDict.keys()) + + children, speeds = [], [] + # Update portname and speed of child ports if present + for port in child_ports: + speed = config_db.get_entry('PORT', port).get('speed') + if speed is not None: + speeds.append(str(int(speed)//1000)+'G') + children.append(port) + + platformDict[port_name]["child ports"] = ",".join(children) + platformDict[port_name]["child port speeds"] = ",".join(speeds) + + # Sorted keys by name in natural sort Order for human readability + parsed = OrderedDict((k, platformDict[k]) for k in natsorted(platformDict.keys())) + click.echo(json.dumps(parsed, indent=4)) + +# 'breakout current-mode' subcommand ("show interfaces breakout current-mode") +@breakout.command('current-mode') +@click.argument('interface', metavar='', required=False, type=str) +@click.pass_context +def currrent_mode(ctx, interface): + """Show current Breakout mode Info by interface(s)""" + + config_db = ctx.obj['db'] + + header = ['Interface', 'Current Breakout Mode'] + body = [] + + try: + curBrkout_tbl = config_db.get_table('BREAKOUT_CFG') + except Exception as e: + click.echo("Breakout table is not present in Config DB") + raise click.Abort() + + # Show current Breakout Mode of user prompted interface + if interface is not None: + body.append([interface, str(curBrkout_tbl[interface]['brkout_mode'])]) + click.echo(tabulate(body, header, tablefmt="grid")) + return + + # Show current Breakout Mode for all interfaces + for name in natsorted(curBrkout_tbl.keys()): + body.append([name, str(curBrkout_tbl[name]['brkout_mode'])]) + click.echo(tabulate(body, header, tablefmt="grid")) + + # # 'neighbor' group ### # diff --git a/sonic-utilities-tests/mock_tables/config_db.json b/sonic-utilities-tests/mock_tables/config_db.json index 3061c3015e..ec278c3450 100644 --- a/sonic-utilities-tests/mock_tables/config_db.json +++ b/sonic-utilities-tests/mock_tables/config_db.json @@ -1,4 +1,13 @@ { + "BREAKOUT_CFG|Ethernet0": { + "brkout_mode": "4x25G[10G]" + }, + "BREAKOUT_CFG|Ethernet4": { + "brkout_mode": "2x50G" + }, + "BREAKOUT_CFG|Ethernet8": { + "brkout_mode": "1x100G[40G]" + }, "PORT|Ethernet0": { "alias": "etp1", "lanes": "0,1,2,3", diff --git a/sonic-utilities-tests/show_breakout_test.py b/sonic-utilities-tests/show_breakout_test.py new file mode 100644 index 0000000000..f3636e9907 --- /dev/null +++ b/sonic-utilities-tests/show_breakout_test.py @@ -0,0 +1,65 @@ +import os +import sys +from click.testing import CliRunner +from unittest import TestCase +from swsssdk import ConfigDBConnector + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +import mock_tables.dbconnector +import show.main as show + +# Expected output for 'show breakout current-mode' +current_mode_all_output = ''+ \ +"""+-------------+-------------------------+ +| Interface | Current Breakout Mode | ++=============+=========================+ +| Ethernet0 | 4x25G[10G] | ++-------------+-------------------------+ +| Ethernet4 | 2x50G | ++-------------+-------------------------+ +| Ethernet8 | 1x100G[40G] | ++-------------+-------------------------+ +""" + +# Expected output for 'show breakout current-mode Ethernet0' +current_mode_intf_output = ''+ \ +"""+-------------+-------------------------+ +| Interface | Current Breakout Mode | ++=============+=========================+ +| Ethernet0 | 4x25G[10G] | ++-------------+-------------------------+ +""" + +class TestBreakout(TestCase): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def setUp(self): + self.runner = CliRunner() + self.config_db = ConfigDBConnector() + self.config_db.connect() + self.obj = {'db': self.config_db} + + # Test 'show interfaces breakout current-mode' + def test_all_intf_current_mode(self): + result = self.runner.invoke(show.cli.commands["interfaces"].commands["breakout"].commands["current-mode"], [], obj=self.obj) + print(sys.stderr, result.output) + assert result.output == current_mode_all_output + + # Test 'show interfaces breakout current-mode Ethernet0' + def test_single_intf_current_mode(self): + result = self.runner.invoke(show.cli.commands["interfaces"].commands["breakout"].commands["current-mode"], ["Ethernet0"], obj=self.obj) + print(sys.stderr, result.output) + assert result.output == current_mode_intf_output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" From a8c36c82da685c5e37c4e492054c830d2ad1ff72 Mon Sep 17 00:00:00 2001 From: Samuel Angebault Date: Fri, 26 Jun 2020 09:56:11 -0700 Subject: [PATCH 098/111] [sonic_installer] Refactor sonic_installer code (#953) Add a new Bootloader abstraction. This makes it easier to add bootloader specific behavior while keeping the main logic identical. It is also a step that will ease the introduction of secureboot which relies on bootloader specific behaviors. Shuffle code around to get rid of the hacky if/else all over the place. There are now 3 bootloader classes - AbootBootloader - GrubBootloader - UbootBootloader There was almost no logic change in any of the implementations. Only the AbootBootloader saw some small improvements. More will follow in subsequent changes. --- setup.py | 1 + sonic_installer/bootloader/__init__.py | 16 ++ sonic_installer/bootloader/aboot.py | 125 ++++++++ sonic_installer/bootloader/bootloader.py | 50 ++++ sonic_installer/bootloader/grub.py | 86 ++++++ sonic_installer/bootloader/onie.py | 48 ++++ sonic_installer/bootloader/uboot.py | 83 ++++++ sonic_installer/common.py | 25 ++ sonic_installer/main.py | 352 ++++------------------- 9 files changed, 487 insertions(+), 299 deletions(-) create mode 100644 sonic_installer/bootloader/__init__.py create mode 100644 sonic_installer/bootloader/aboot.py create mode 100644 sonic_installer/bootloader/bootloader.py create mode 100644 sonic_installer/bootloader/grub.py create mode 100644 sonic_installer/bootloader/onie.py create mode 100644 sonic_installer/bootloader/uboot.py create mode 100644 sonic_installer/common.py diff --git a/setup.py b/setup.py index edffa77cd0..adbcc2c992 100644 --- a/setup.py +++ b/setup.py @@ -49,6 +49,7 @@ 'pddf_ledutil', 'show', 'sonic_installer', + 'sonic_installer.bootloader', 'sonic-utilities-tests', 'undebug', 'utilities_common', diff --git a/sonic_installer/bootloader/__init__.py b/sonic_installer/bootloader/__init__.py new file mode 100644 index 0000000000..d2872eb7d0 --- /dev/null +++ b/sonic_installer/bootloader/__init__.py @@ -0,0 +1,16 @@ + +from .aboot import AbootBootloader +from .grub import GrubBootloader +from .uboot import UbootBootloader + +BOOTLOADERS = [ + AbootBootloader, + GrubBootloader, + UbootBootloader, +] + +def get_bootloader(): + for bootloaderCls in BOOTLOADERS: + if bootloaderCls.detect(): + return bootloaderCls() + raise RuntimeError('Bootloader could not be detected') diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py new file mode 100644 index 0000000000..b7c1a061ae --- /dev/null +++ b/sonic_installer/bootloader/aboot.py @@ -0,0 +1,125 @@ +""" +Bootloader implementation for Aboot used on Arista devices +""" + +import collections +import os +import re +import subprocess + +import click + +from ..common import ( + HOST_PATH, + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, + run_command, +) +from .bootloader import Bootloader + +_secureboot = None +def isSecureboot(): + global _secureboot + if _secureboot is None: + with open('/proc/cmdline') as f: + m = re.search(r"secure_boot_enable=[y1]", f.read()) + _secureboot = bool(m) + return _secureboot + +class AbootBootloader(Bootloader): + + NAME = 'aboot' + BOOT_CONFIG_PATH = os.path.join(HOST_PATH, 'boot-config') + DEFAULT_IMAGE_PATH = '/tmp/sonic_image.swi' + + def _boot_config_read(self, path=BOOT_CONFIG_PATH): + config = collections.OrderedDict() + with open(path) as f: + for line in f.readlines(): + line = line.strip() + if not line or line.startswith('#') or '=' not in line: + continue + key, value = line.split('=', 1) + config[key] = value + return config + + def _boot_config_write(self, config, path=BOOT_CONFIG_PATH): + with open(path, 'w') as f: + f.write(''.join('%s=%s\n' % (k, v) for k, v in config.items())) + + def _boot_config_set(self, **kwargs): + path = kwargs.pop('path', self.BOOT_CONFIG_PATH) + config = self._boot_config_read(path=path) + for key, value in kwargs.items(): + config[key] = value + self._boot_config_write(config, path=path) + + def _swi_image_path(self, image): + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + if isSecureboot(): + return 'flash:%s/sonic.swi' % image_dir + return 'flash:%s/.sonic-boot.swi' % image_dir + + def get_current_image(self): + with open('/proc/cmdline') as f: + current = re.search(r"loop=/*(\S+)/", f.read()).group(1) + return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + + def get_installed_images(self): + images = [] + for filename in os.listdir(HOST_PATH): + if filename.startswith(IMAGE_DIR_PREFIX): + images.append(filename.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX)) + return images + + def get_next_image(self): + config = self._boot_config_read() + match = re.search(r"flash:/*(\S+)/", config['SWI']) + return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + + def set_default_image(self, image): + image_path = self._swi_image_path(image) + self._boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) + return True + + def set_next_image(self, image): + image_path = self._swi_image_path(image) + self._boot_config_set(SWI=image_path) + return True + + def install_image(self, image_path): + run_command("/usr/bin/unzip -od /tmp %s boot0" % image_path) + run_command("swipath=%s target_path=/host sonic_upgrade=1 . /tmp/boot0" % image_path) + + def remove_image(self, image): + nextimage = self.get_next_image() + current = self.get_current_image() + if image == nextimage: + image_path = self._swi_image_path(current) + self._boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) + click.echo("Set next and default boot to current image %s" % current) + + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', os.path.join(HOST_PATH, image_dir)]) + click.echo('Image removed') + + def get_binary_image_version(self, image_path): + try: + version = subprocess.check_output(['/usr/bin/unzip', '-qop', image_path, '.imagehash']) + except subprocess.CalledProcessError: + return None + return IMAGE_PREFIX + version.strip() + + def verify_binary_image(self, image_path): + try: + subprocess.check_call(['/usr/bin/unzip', '-tq', image_path]) + # TODO: secureboot check signature + except subprocess.CalledProcessError: + return False + return True + + @classmethod + def detect(cls): + with open('/proc/cmdline') as f: + return 'Aboot=' in f.read() diff --git a/sonic_installer/bootloader/bootloader.py b/sonic_installer/bootloader/bootloader.py new file mode 100644 index 0000000000..78bd05c61c --- /dev/null +++ b/sonic_installer/bootloader/bootloader.py @@ -0,0 +1,50 @@ +""" +Abstract Bootloader class +""" + +class Bootloader(object): + + NAME = None + DEFAULT_IMAGE_PATH = None + + def get_current_image(self): + """returns name of the current image""" + raise NotImplementedError + + def get_next_image(self): + """returns name of the next image""" + raise NotImplementedError + + def get_installed_images(self): + """returns list of installed images""" + raise NotImplementedError + + def set_default_image(self, image): + """set default image to boot from""" + raise NotImplementedError + + def set_next_image(self, image): + """set next image to boot from""" + raise NotImplementedError + + def install_image(self, image_path): + """install new image""" + raise NotImplementedError + + def remove_image(self, image): + """remove existing image""" + raise NotImplementedError + + def get_binary_image_version(self, image_path): + """returns the version of the image""" + raise NotImplementedError + + def verify_binary_image(self, image_path): + """verify that the image is supported by the bootloader""" + raise NotImplementedError + + @classmethod + def detect(cls): + """returns True if the bootloader is in use""" + return False + diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py new file mode 100644 index 0000000000..1d111f4191 --- /dev/null +++ b/sonic_installer/bootloader/grub.py @@ -0,0 +1,86 @@ +""" +Bootloader implementation for grub based platforms +""" + +import os +import re +import subprocess + +import click + +from ..common import ( + HOST_PATH, + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, + run_command, +) +from .onie import OnieInstallerBootloader + +class GrubBootloader(OnieInstallerBootloader): + + NAME = 'grub' + + def get_installed_images(self): + images = [] + config = open(HOST_PATH + '/grub/grub.cfg', 'r') + for line in config: + if line.startswith('menuentry'): + image = line.split()[1].strip("'") + if IMAGE_PREFIX in image: + images.append(image) + config.close() + return images + + def get_next_image(self): + images = self.get_installed_images() + grubenv = subprocess.check_output(["/usr/bin/grub-editenv", HOST_PATH + "/grub/grubenv", "list"]) + m = re.search(r"next_entry=(\d+)", grubenv) + if m: + next_image_index = int(m.group(1)) + else: + m = re.search(r"saved_entry=(\d+)", grubenv) + if m: + next_image_index = int(m.group(1)) + else: + next_image_index = 0 + return images[next_image_index] + + def set_default_image(self, image): + images = self.get_installed_images() + command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) + run_command(command) + return True + + def set_next_image(self, image): + images = self.get_installed_images() + command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) + run_command(command) + return True + + def install_image(self, image_path): + run_command("bash " + image_path) + run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + + def remove_image(self, image): + click.echo('Updating GRUB...') + config = open(HOST_PATH + '/grub/grub.cfg', 'r') + old_config = config.read() + menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group() + config.close() + config = open(HOST_PATH + '/grub/grub.cfg', 'w') + # remove menuentry of the image in grub.cfg + config.write(old_config.replace(menuentry, "")) + config.close() + click.echo('Done') + + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) + click.echo('Done') + + run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + click.echo('Image removed') + + @classmethod + def detect(cls): + return os.path.isfile(os.path.join(HOST_PATH, 'grub/grub.cfg')) diff --git a/sonic_installer/bootloader/onie.py b/sonic_installer/bootloader/onie.py new file mode 100644 index 0000000000..ca16172efa --- /dev/null +++ b/sonic_installer/bootloader/onie.py @@ -0,0 +1,48 @@ +""" +Common logic for bootloaders using an ONIE installer image +""" + +import os +import re +import signal +import subprocess + +from ..common import ( + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, +) +from .bootloader import Bootloader + +# Needed to prevent "broken pipe" error messages when piping +# output of multiple commands using subprocess.Popen() +def default_sigpipe(): + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +class OnieInstallerBootloader(Bootloader): # pylint: disable=abstract-method + + DEFAULT_IMAGE_PATH = '/tmp/sonic_image' + + def get_current_image(self): + cmdline = open('/proc/cmdline', 'r') + current = re.search(r"loop=(\S+)/fs.squashfs", cmdline.read()).group(1) + cmdline.close() + return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) + + def get_binary_image_version(self, image_path): + """returns the version of the image""" + p1 = subprocess.Popen(["cat", "-v", image_path], stdout=subprocess.PIPE, preexec_fn=default_sigpipe) + p2 = subprocess.Popen(["grep", "-m 1", "^image_version"], stdin=p1.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) + p3 = subprocess.Popen(["sed", "-n", r"s/^image_version=\"\(.*\)\"$/\1/p"], stdin=p2.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) + + stdout = p3.communicate()[0] + p3.wait() + version_num = stdout.rstrip('\n') + + # If we didn't read a version number, this doesn't appear to be a valid SONiC image file + if not version_num: + return None + + return IMAGE_PREFIX + version_num + + def verify_binary_image(self, image_path): + return os.path.isfile(image_path) diff --git a/sonic_installer/bootloader/uboot.py b/sonic_installer/bootloader/uboot.py new file mode 100644 index 0000000000..47252dd6af --- /dev/null +++ b/sonic_installer/bootloader/uboot.py @@ -0,0 +1,83 @@ +""" +Bootloader implementation for uboot based platforms +""" + +import platform +import subprocess + +import click + +from ..common import ( + HOST_PATH, + IMAGE_DIR_PREFIX, + IMAGE_PREFIX, + run_command, +) +from .onie import OnieInstallerBootloader + +class UbootBootloader(OnieInstallerBootloader): + + NAME = 'uboot' + + def get_installed_images(self): + images = [] + proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + image = out.rstrip() + if IMAGE_PREFIX in image: + images.append(image) + proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + image = out.rstrip() + if IMAGE_PREFIX in image: + images.append(image) + return images + + def get_next_image(self): + images = self.get_installed_images() + proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + image = out.rstrip() + if "sonic_image_2" in image: + next_image_index = 1 + else: + next_image_index = 0 + return images[next_image_index] + + def set_default_image(self, image): + images = self.get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + return True + + def set_next_image(self, image): + images = self.get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') + return True + + def install_image(self, image_path): + run_command("bash " + image_path) + + def remove_image(self, image): + click.echo('Updating next boot ...') + images = self.get_installed_images() + if image in images[0]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') + run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') + elif image in images[1]: + run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') + run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') + image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) + click.echo('Removing image root filesystem...') + subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) + click.echo('Done') + + @classmethod + def detect(cls): + arch = platform.machine() + return ("arm" in arch) or ("aarch64" in arch) diff --git a/sonic_installer/common.py b/sonic_installer/common.py new file mode 100644 index 0000000000..f12454042a --- /dev/null +++ b/sonic_installer/common.py @@ -0,0 +1,25 @@ +""" +Module holding common functions and constants used by sonic_installer and its +subpackages. +""" + +import subprocess +import sys + +import click + +HOST_PATH = '/host' +IMAGE_PREFIX = 'SONiC-OS-' +IMAGE_DIR_PREFIX = 'image-' + +# Run bash command and print output to stdout +def run_command(command): + click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) + + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + (out, _) = proc.communicate() + + click.echo(out) + + if proc.returncode != 0: + sys.exit(proc.returncode) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index fb8179c9c6..3c68bcd843 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -1,8 +1,6 @@ #! /usr/bin/python -u import os -import re -import signal import sys import time import click @@ -10,45 +8,30 @@ import syslog import subprocess from swsssdk import SonicV2Connector -import collections -import platform - -HOST_PATH = '/host' -IMAGE_PREFIX = 'SONiC-OS-' -IMAGE_DIR_PREFIX = 'image-' -ONIE_DEFAULT_IMAGE_PATH = '/tmp/sonic_image' -ABOOT_DEFAULT_IMAGE_PATH = '/tmp/sonic_image.swi' -IMAGE_TYPE_ABOOT = 'aboot' -IMAGE_TYPE_ONIE = 'onie' -ABOOT_BOOT_CONFIG = '/boot-config' -BOOTLOADER_TYPE_GRUB = 'grub' -BOOTLOADER_TYPE_UBOOT = 'uboot' -ARCH = platform.machine() -BOOTLOADER = BOOTLOADER_TYPE_UBOOT if ("arm" in ARCH) or ("aarch64" in ARCH) else BOOTLOADER_TYPE_GRUB + +from .bootloader import get_bootloader +from .common import run_command # # Helper functions # -# Needed to prevent "broken pipe" error messages when piping -# output of multiple commands using subprocess.Popen() -def default_sigpipe(): - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - +_start_time = None +_last_time = None def reporthook(count, block_size, total_size): - global start_time, last_time + global _start_time, _last_time cur_time = int(time.time()) if count == 0: - start_time = cur_time - last_time = cur_time + _start_time = cur_time + _last_time = cur_time return - if cur_time == last_time: + if cur_time == _last_time: return - last_time = cur_time + _last_time = cur_time - duration = cur_time - start_time + duration = cur_time - _start_time progress_size = int(count * block_size) speed = int(progress_size / (1024 * duration)) percent = int(count * block_size * 100 / total_size) @@ -57,226 +40,13 @@ def reporthook(count, block_size, total_size): (percent, progress_size / (1024 * 1024), speed, time_left)) sys.stdout.flush() -def get_running_image_type(): - """ Attempt to determine whether we are running an ONIE or Aboot image """ - cmdline = open('/proc/cmdline', 'r') - if "Aboot=" in cmdline.read(): - return IMAGE_TYPE_ABOOT - return IMAGE_TYPE_ONIE - -# Returns None if image doesn't exist or isn't a regular file -def get_binary_image_type(binary_image_path): - """ Attempt to determine whether this is an ONIE or Aboot image file """ - if not os.path.isfile(binary_image_path): - return None - - with open(binary_image_path) as f: - # Aboot file is a zip archive; check the start of the file for the zip magic number - if f.read(4) == "\x50\x4b\x03\x04": - return IMAGE_TYPE_ABOOT - return IMAGE_TYPE_ONIE - -# Returns None if image doesn't exist or doesn't appear to be a valid SONiC image file -def get_binary_image_version(binary_image_path): - binary_type = get_binary_image_type(binary_image_path) - if not binary_type: - return None - elif binary_type == IMAGE_TYPE_ABOOT: - p1 = subprocess.Popen(["unzip", "-p", binary_image_path, "boot0"], stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p2 = subprocess.Popen(["grep", "-m 1", "^image_name"], stdin=p1.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p3 = subprocess.Popen(["sed", "-n", r"s/^image_name=\"\image-\(.*\)\"$/\1/p"], stdin=p2.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - else: - p1 = subprocess.Popen(["cat", "-v", binary_image_path], stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p2 = subprocess.Popen(["grep", "-m 1", "^image_version"], stdin=p1.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - p3 = subprocess.Popen(["sed", "-n", r"s/^image_version=\"\(.*\)\"$/\1/p"], stdin=p2.stdout, stdout=subprocess.PIPE, preexec_fn=default_sigpipe) - - stdout = p3.communicate()[0] - p3.wait() - version_num = stdout.rstrip('\n') - - # If we didn't read a version number, this doesn't appear to be a valid SONiC image file - if len(version_num) == 0: - return None - - return IMAGE_PREFIX + version_num - -# Sets specified image as default image to boot from -def set_default_image(image): - images = get_installed_images() - if image not in images: - return False - - if get_running_image_type() == IMAGE_TYPE_ABOOT: - image_path = aboot_image_path(image) - aboot_boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - command = 'grub-set-default --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) - run_command(command) - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - if image in images[0]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') - elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') - - return True - -def aboot_read_boot_config(path): - config = collections.OrderedDict() - with open(path) as f: - for line in f.readlines(): - line = line.strip() - if not line or line.startswith('#') or '=' not in line: - continue - key, value = line.split('=', 1) - config[key] = value - return config - -def aboot_write_boot_config(path, config): - with open(path, 'w') as f: - f.write(''.join( '%s=%s\n' % (k, v) for k, v in config.items())) - -def aboot_boot_config_set(**kwargs): - path = kwargs.get('path', HOST_PATH + ABOOT_BOOT_CONFIG) - config = aboot_read_boot_config(path) - for key, value in kwargs.items(): - config[key] = value - aboot_write_boot_config(path, config) - -def aboot_image_path(image): - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - return 'flash:%s/.sonic-boot.swi' % image_dir - -# Run bash command and print output to stdout -def run_command(command): - click.echo(click.style("Command: ", fg='cyan') + click.style(command, fg='green')) - - proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - - click.echo(out) - - if proc.returncode != 0: - sys.exit(proc.returncode) - -# Returns list of installed images -def get_installed_images(): - images = [] - if get_running_image_type() == IMAGE_TYPE_ABOOT: - for filename in os.listdir(HOST_PATH): - if filename.startswith(IMAGE_DIR_PREFIX): - images.append(filename.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX)) - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - config = open(HOST_PATH + '/grub/grub.cfg', 'r') - for line in config: - if line.startswith('menuentry'): - image = line.split()[1].strip("'") - if IMAGE_PREFIX in image: - images.append(image) - config.close() - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_1", shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - image = out.rstrip() - if IMAGE_PREFIX in image: - images.append(image) - proc = subprocess.Popen("/usr/bin/fw_printenv -n sonic_version_2", shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - image = out.rstrip() - if IMAGE_PREFIX in image: - images.append(image) - return images - -# Returns name of current image -def get_current_image(): - cmdline = open('/proc/cmdline', 'r') - current = re.search("loop=(\S+)/fs.squashfs", cmdline.read()).group(1) - cmdline.close() - return current.replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) - -# Returns name of next boot image -def get_next_image(): - if get_running_image_type() == IMAGE_TYPE_ABOOT: - config = open(HOST_PATH + ABOOT_BOOT_CONFIG, 'r') - next_image = re.search("SWI=flash:(\S+)/", config.read()).group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX) - config.close() - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - images = get_installed_images() - grubenv = subprocess.check_output(["/usr/bin/grub-editenv", HOST_PATH + "/grub/grubenv", "list"]) - m = re.search("next_entry=(\d+)", grubenv) - if m: - next_image_index = int(m.group(1)) - else: - m = re.search("saved_entry=(\d+)", grubenv) - if m: - next_image_index = int(m.group(1)) - else: - next_image_index = 0 - next_image = images[next_image_index] - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - images = get_installed_images() - proc = subprocess.Popen("/usr/bin/fw_printenv -n boot_next", shell=True, stdout=subprocess.PIPE) - (out, err) = proc.communicate() - image = out.rstrip() - if "sonic_image_2" in image: - next_image_index = 1 - else: - next_image_index = 0 - next_image = images[next_image_index] - return next_image - -def remove_image(image): - if get_running_image_type() == IMAGE_TYPE_ABOOT: - nextimage = get_next_image() - current = get_current_image() - if image == nextimage: - image_path = aboot_image_path(current) - aboot_boot_config_set(SWI=image_path, SWI_DEFAULT=image_path) - click.echo("Set next and default boot to current image %s" % current) - - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - click.echo('Removing image root filesystem...') - subprocess.call(['rm','-rf', os.path.join(HOST_PATH, image_dir)]) - click.echo('Image removed') - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - click.echo('Updating GRUB...') - config = open(HOST_PATH + '/grub/grub.cfg', 'r') - old_config = config.read() - menuentry = re.search("menuentry '" + image + "[^}]*}", old_config).group() - config.close() - config = open(HOST_PATH + '/grub/grub.cfg', 'w') - # remove menuentry of the image in grub.cfg - config.write(old_config.replace(menuentry, "")) - config.close() - click.echo('Done') - - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - click.echo('Removing image root filesystem...') - subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) - click.echo('Done') - - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') - click.echo('Image removed') - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - click.echo('Updating next boot ...') - images = get_installed_images() - if image in images[0]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_2"') - run_command('/usr/bin/fw_setenv sonic_version_1 "NONE"') - elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_next "run sonic_image_1"') - run_command('/usr/bin/fw_setenv sonic_version_2 "NONE"') - image_dir = image.replace(IMAGE_PREFIX, IMAGE_DIR_PREFIX) - click.echo('Removing image root filesystem...') - subprocess.call(['rm','-rf', HOST_PATH + '/' + image_dir]) - click.echo('Done') - # TODO: Embed tag name info into docker image meta data at build time, # and extract tag name from docker image file. def get_docker_tag_name(image): # Try to get tag name from label metadata cmd = "docker inspect --format '{{.ContainerConfig.Labels.Tag}}' " + image proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) - (out, err) = proc.communicate() + (out, _) = proc.communicate() if proc.returncode != 0: return "unknown" tag = out.rstrip() @@ -292,7 +62,7 @@ def validate_url_or_abort(url): urlfile = urllib.urlopen(url) response_code = urlfile.getcode() urlfile.close() - except IOError, err: + except IOError: response_code = None if not response_code: @@ -313,7 +83,7 @@ def get_container_image_name(container_name): # example image: docker-lldp-sv2:latest cmd = "docker inspect --format '{{.Config.Image}}' " + container_name proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) - (out, err) = proc.communicate() + (out, _) = proc.communicate() if proc.returncode != 0: sys.exit(proc.returncode) image_latest = out.rstrip() @@ -374,52 +144,42 @@ def cli(): @click.argument('url') def install(url, force, skip_migration=False): """ Install image from local binary or URL""" - if get_running_image_type() == IMAGE_TYPE_ABOOT: - DEFAULT_IMAGE_PATH = ABOOT_DEFAULT_IMAGE_PATH - else: - DEFAULT_IMAGE_PATH = ONIE_DEFAULT_IMAGE_PATH + bootloader = get_bootloader() if url.startswith('http://') or url.startswith('https://'): click.echo('Downloading image...') validate_url_or_abort(url) try: - urllib.urlretrieve(url, DEFAULT_IMAGE_PATH, reporthook) + urllib.urlretrieve(url, bootloader.DEFAULT_IMAGE_PATH, reporthook) + click.echo('') except Exception as e: click.echo("Download error", e) raise click.Abort() - image_path = DEFAULT_IMAGE_PATH + image_path = bootloader.DEFAULT_IMAGE_PATH else: image_path = os.path.join("./", url) - running_image_type = get_running_image_type() - binary_image_type = get_binary_image_type(image_path) - binary_image_version = get_binary_image_version(image_path) - if not binary_image_type or not binary_image_version: + binary_image_version = bootloader.get_binary_image_version(image_path) + if not binary_image_version: click.echo("Image file does not exist or is not a valid SONiC image file") raise click.Abort() # Is this version already installed? - if binary_image_version in get_installed_images(): + if binary_image_version in bootloader.get_installed_images(): click.echo("Image {} is already installed. Setting it as default...".format(binary_image_version)) - if not set_default_image(binary_image_version): + if not bootloader.set_default_image(binary_image_version): click.echo('Error: Failed to set image as default') raise click.Abort() else: # Verify that the binary image is of the same type as the running image - if (binary_image_type != running_image_type) and not force: - click.echo("Image file '{}' is of a different type than running image.\n" + - "If you are sure you want to install this image, use -f|--force.\n" + + if not bootloader.verify_binary_image(image_path) and not force: + click.echo("Image file '{}' is of a different type than running image.\n" + "If you are sure you want to install this image, use -f|--force.\n" "Aborting...".format(image_path)) raise click.Abort() click.echo("Installing image {} and setting it as default...".format(binary_image_version)) - if running_image_type == IMAGE_TYPE_ABOOT: - run_command("/usr/bin/unzip -od /tmp %s boot0" % image_path) - run_command("swipath=%s target_path=/host sonic_upgrade=1 . /tmp/boot0" % image_path) - else: - run_command("bash " + image_path) - if BOOTLOADER == BOOTLOADER_TYPE_GRUB: - run_command('grub-set-default --boot-directory=' + HOST_PATH + ' 0') + bootloader.install_image(image_path) # Take a backup of current configuration if skip_migration: click.echo("Skipping configuration migration as requested in the command option.") @@ -433,12 +193,13 @@ def install(url, force, skip_migration=False): # List installed images -@cli.command() -def list(): +@cli.command('list') +def list_command(): """ Print installed images """ - images = get_installed_images() - curimage = get_current_image() - nextimage = get_next_image() + bootloader = get_bootloader() + images = bootloader.get_installed_images() + curimage = bootloader.get_current_image() + nextimage = bootloader.get_next_image() click.echo("Current: " + curimage) click.echo("Next: " + nextimage) click.echo("Available: ") @@ -450,32 +211,22 @@ def list(): @click.argument('image') def set_default(image): """ Choose image to boot from by default """ - if not set_default_image(image): + bootloader = get_bootloader() + if image not in bootloader.get_installed_images(): click.echo('Error: Image does not exist') raise click.Abort() - + bootloader.set_default_image(image) # Set image for next boot @cli.command('set_next_boot') @click.argument('image') def set_next_boot(image): """ Choose image for next reboot (one time action) """ - images = get_installed_images() - if image not in images: - click.echo('Image does not exist') + bootloader = get_bootloader() + if image not in bootloader.get_installed_images(): + click.echo('Error: Image does not exist') sys.exit(1) - if get_running_image_type() == IMAGE_TYPE_ABOOT: - image_path = aboot_image_path(image) - aboot_boot_config_set(SWI=image_path) - elif BOOTLOADER == BOOTLOADER_TYPE_GRUB: - command = 'grub-reboot --boot-directory=' + HOST_PATH + ' ' + str(images.index(image)) - run_command(command) - elif BOOTLOADER == BOOTLOADER_TYPE_UBOOT: - if image in images[0]: - run_command('/usr/bin/fw_setenv boot_once "run sonic_image_1"') - elif image in images[1]: - run_command('/usr/bin/fw_setenv boot_once "run sonic_image_2"') - + bootloader.set_next_image(image) # Uninstall image @cli.command() @@ -484,28 +235,30 @@ def set_next_boot(image): @click.argument('image') def remove(image): """ Uninstall image """ - images = get_installed_images() - current = get_current_image() + bootloader = get_bootloader() + images = bootloader.get_installed_images() + current = bootloader.get_current_image() if image not in images: click.echo('Image does not exist') sys.exit(1) if image == current: click.echo('Cannot remove current image') sys.exit(1) - - remove_image(image) + # TODO: check if image is next boot or default boot and fix these + bootloader.remove_image(image) # Retrieve version from binary image file and print to screen @cli.command('binary_version') @click.argument('binary_image_path') def binary_version(binary_image_path): """ Get version from local binary image file """ - binary_version = get_binary_image_version(binary_image_path) - if not binary_version: + bootloader = get_bootloader() + version = bootloader.get_binary_image_version(binary_image_path) + if not version: click.echo("Image file does not exist or is not a valid SONiC image file") sys.exit(1) else: - click.echo(binary_version) + click.echo(version) # Remove installed images which are not current and next @cli.command() @@ -513,14 +266,15 @@ def binary_version(binary_image_path): expose_value=False, prompt='Remove images which are not current and next, continue?') def cleanup(): """ Remove installed images which are not current and next """ - images = get_installed_images() - curimage = get_current_image() - nextimage = get_next_image() + bootloader = get_bootloader() + images = bootloader.get_installed_images() + curimage = bootloader.get_current_image() + nextimage = bootloader.get_next_image() image_removed = 0 for image in images: if image != curimage and image != nextimage: click.echo("Removing image %s" % image) - remove_image(image) + bootloader.remove_image(image) image_removed += 1 if image_removed == 0: From c4a564bed6bb71b37c6fcf74b33ce3fb6edc7c1b Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Sat, 27 Jun 2020 10:59:56 -0700 Subject: [PATCH 099/111] [config] Fix indentation level in _get_disabled_services_list() (#965) If FEATURE table did not exist,` config load/reload/reload_minigraph` commands would crash similar to the following: ``` Executing reset-failed of service nat... Executing reset-failed of service sflow... Traceback (most recent call last): File "/usr/bin/config", line 12, in sys.exit(config()) File "/usr/lib/python2.7/dist-packages/click/core.py", line 764, in __call__ return self.main(*args, **kwargs) File "/usr/lib/python2.7/dist-packages/click/core.py", line 717, in main rv = self.invoke(ctx) File "/usr/lib/python2.7/dist-packages/click/core.py", line 1137, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/lib/python2.7/dist-packages/click/core.py", line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/lib/python2.7/dist-packages/click/core.py", line 555, in invoke return callback(*args, **kwargs) File "/usr/lib/python2.7/dist-packages/config/main.py", line 862, in reload _restart_services() File "/usr/lib/python2.7/dist-packages/config/main.py", line 585, in _restart_services disable_services = _get_disabled_services_list() File "/usr/lib/python2.7/dist-packages/config/main.py", line 518, in _get_disabled_services_list if status == "disabled": UnboundLocalError: local variable 'status' referenced before assignment ``` --- config/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 731043a668..ca95cdc631 100755 --- a/config/main.py +++ b/config/main.py @@ -515,8 +515,8 @@ def _get_disabled_services_list(): log_warning("Status of feature '{}' is None".format(feature_name)) continue - if status == "disabled": - disabled_services_list.append(feature_name) + if status == "disabled": + disabled_services_list.append(feature_name) else: log_warning("Unable to retreive FEATURE table") From 40d4667f00819fc6743f5f574ae5755b214a3a88 Mon Sep 17 00:00:00 2001 From: Joe LeVeque Date: Sat, 27 Jun 2020 11:00:21 -0700 Subject: [PATCH 100/111] [config] Fix syntax error (#966) Parenthesize multiple exception types. Otherwise Python2 will incorrectly interpret the last exception type as the target. --- config/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/main.py b/config/main.py index ca95cdc631..3b92f666d2 100755 --- a/config/main.py +++ b/config/main.py @@ -114,7 +114,7 @@ def get_command(self, ctx, cmd_name): try: version_info = sonic_device_util.get_sonic_version_info() asic_type = version_info['asic_type'] -except KeyError, TypeError: +except (KeyError, TypeError): raise click.Abort() # From 347de126994dcf4c1032014aa889bdb78f4e2094 Mon Sep 17 00:00:00 2001 From: xumia <59720581+xumia@users.noreply.github.com> Date: Tue, 30 Jun 2020 17:29:51 +0800 Subject: [PATCH 101/111] Suppport to verify aboot swi image for secure boot (#969) * Suppport to verify aboot swi image for secure boot * Simplify the code * Fix not return value bug * Add m2crypto to setup.py * Change to only verify the image signed by a correct certificate --- setup.py | 3 +- sonic_installer/bootloader/aboot.py | 50 ++++++++++++++++++++++++++++- 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index adbcc2c992..8bccb843f1 100644 --- a/setup.py +++ b/setup.py @@ -149,7 +149,8 @@ # - tabulate install_requires=[ 'click', - 'natsort' + 'natsort', + 'm2crypto' ], setup_requires= [ 'pytest-runner' diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index b7c1a061ae..1933921512 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -2,13 +2,18 @@ Bootloader implementation for Aboot used on Arista devices """ +import base64 import collections import os import re import subprocess +import sys +import zipfile import click +from M2Crypto import X509 + from ..common import ( HOST_PATH, IMAGE_DIR_PREFIX, @@ -18,6 +23,12 @@ from .bootloader import Bootloader _secureboot = None + +# For the signature format, see: https://github.com/aristanetworks/swi-tools/tree/master/switools +SWI_SIG_FILE_NAME = 'swi-signature' +SWIX_SIG_FILE_NAME = 'swix-signature' +ISSUERCERT = 'IssuerCert' + def isSecureboot(): global _secureboot if _secureboot is None: @@ -114,11 +125,48 @@ def get_binary_image_version(self, image_path): def verify_binary_image(self, image_path): try: subprocess.check_call(['/usr/bin/unzip', '-tq', image_path]) - # TODO: secureboot check signature + return self._verify_secureboot_image(image_path) except subprocess.CalledProcessError: return False + + def _verify_secureboot_image(self, image_path): + if isSecureboot(): + cert = self.getCert(image_path) + return cert is not None return True + @classmethod + def getCert(cls, swiFile): + with zipfile.ZipFile(swiFile, 'r') as swi: + try: + sigInfo = swi.getinfo(cls.getSigFileName(swiFile)) + except KeyError: + # Occurs if SIG_FILE_NAME is not in the swi (the SWI is not signed properly) + return None + with swi.open(sigInfo, 'r') as sigFile: + for line in sigFile: + data = line.split(':') + if len(data) == 2: + if data[0] == ISSUERCERT: + try: + base64_cert = cls.base64Decode(data[1].strip()) + return X509.load_cert_string(base64_cert) + except TypeError: + return None + else: + sys.stderr.write('Unexpected format for line in swi[x]-signature file: %s\n' % line) + return None + + @classmethod + def getSigFileName(cls, swiFile): + if swiFile.lower().endswith(".swix"): + return SWIX_SIG_FILE_NAME + return SWI_SIG_FILE_NAME + + @classmethod + def base64Decode(cls, text): + return base64.standard_b64decode(text) + @classmethod def detect(cls): with open('/proc/cmdline') as f: From bb08bf32435802cd7cb0d8737d370469b57c8f5b Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Tue, 30 Jun 2020 17:31:07 -0700 Subject: [PATCH 102/111] Append default mask field if user has not provided one (#972) * Append mask if user has not provided one --- config/main.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/config/main.py b/config/main.py index 3b92f666d2..9786ba9e4d 100755 --- a/config/main.py +++ b/config/main.py @@ -2045,7 +2045,9 @@ def add(ctx, interface_name, ip_addr, gw): ctx.fail("'interface_name' is None!") try: - ipaddress.ip_network(unicode(ip_addr), strict=False) + net = ipaddress.ip_network(unicode(ip_addr), strict=False) + if '/' not in ip_addr: + ip_addr = str(net) if interface_name == 'eth0': @@ -2102,7 +2104,9 @@ def remove(ctx, interface_name, ip_addr): ctx.fail("'interface_name' is None!") try: - ipaddress.ip_network(unicode(ip_addr), strict=False) + net = ipaddress.ip_network(unicode(ip_addr), strict=False) + if '/' not in ip_addr: + ip_addr = str(net) if interface_name == 'eth0': config_db.set_entry("MGMT_INTERFACE", (interface_name, ip_addr), None) From 90dff47b3622b74f4ae9959c5a529e8343519069 Mon Sep 17 00:00:00 2001 From: rupesh-k <53595165+rupesh-k@users.noreply.github.com> Date: Thu, 2 Jul 2020 00:56:31 +0530 Subject: [PATCH 103/111] Add support for port mirroring CLIs (#936) * Add support for port mirroring CLIs Signed-off-by: Rupesh Kumar --- acl_loader/main.py | 29 +++-- config/main.py | 231 ++++++++++++++++++++++++++++++++++++--- doc/Command-Reference.md | 76 +++++++++++-- 3 files changed, 301 insertions(+), 35 deletions(-) diff --git a/acl_loader/main.py b/acl_loader/main.py index 7b7e480f6b..c719100fd9 100644 --- a/acl_loader/main.py +++ b/acl_loader/main.py @@ -718,21 +718,30 @@ def show_session(self, session_name): :param session_name: Optional. Mirror session name. Filter sessions by specified name. :return: """ - header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", "Policer", "Monitor Port") + erspan_header = ("Name", "Status", "SRC IP", "DST IP", "GRE", "DSCP", "TTL", "Queue", + "Policer", "Monitor Port", "SRC Port", "Direction") + span_header = ("Name", "Status", "DST Port", "SRC Port", "Direction", "Queue", "Policer") - data = [] + erspan_data = [] + span_data = [] for key, val in self.get_sessions_db_info().iteritems(): if session_name and key != session_name: continue - # For multi-mpu platform status and monitor port will be dict() - # of 'asic-x':value - data.append([key, val["status"], val["src_ip"], val["dst_ip"], - val.get("gre_type", ""), val.get("dscp", ""), - val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""), - val.get("monitor_port", "")]) - - print(tabulate.tabulate(data, headers=header, tablefmt="simple", missingval="")) + if val.get("type") == "SPAN": + span_data.append([key, val.get("status", ""), val.get("dst_port", ""), + val.get("src_port", ""), val.get("direction", "").lower(), + val.get("queue", ""), val.get("policer", "")]) + else: + erspan_data.append([key, val.get("status", ""), val.get("src_ip", ""), + val.get("dst_ip", ""), val.get("gre_type", ""), val.get("dscp", ""), + val.get("ttl", ""), val.get("queue", ""), val.get("policer", ""), + val.get("monitor_port", ""), val.get("src_port", ""), val.get("direction", "").lower()]) + + print("ERSPAN Sessions") + print(tabulate.tabulate(erspan_data, headers=erspan_header, tablefmt="simple", missingval="")) + print("\nSPAN Sessions") + print(tabulate.tabulate(span_data, headers=span_header, tablefmt="simple", missingval="")) def show_policer(self, policer_name): """ diff --git a/config/main.py b/config/main.py index 9786ba9e4d..bd37962005 100755 --- a/config/main.py +++ b/config/main.py @@ -604,6 +604,99 @@ def is_ipaddress(val): return False return True +def interface_is_in_vlan(vlan_member_table, interface_name): + """ Check if an interface is in a vlan """ + for _,intf in vlan_member_table.keys(): + if intf == interface_name: + return True + + return False + +def interface_is_in_portchannel(portchannel_member_table, interface_name): + """ Check if an interface is part of portchannel """ + for _,intf in portchannel_member_table.keys(): + if intf == interface_name: + return True + + return False + +def interface_is_router_port(interface_table, interface_name): + """ Check if an interface has router config """ + for intf in interface_table.keys(): + if (interface_name == intf[0]): + return True + + return False + +def interface_is_mirror_dst_port(config_db, interface_name): + """ Check if port is already configured as mirror destination port """ + mirror_table = config_db.get_table('MIRROR_SESSION') + for _,v in mirror_table.items(): + if 'dst_port' in v and v['dst_port'] == interface_name: + return True + + return False + +def interface_has_mirror_config(mirror_table, interface_name): + """ Check if port is already configured with mirror config """ + for _,v in mirror_table.items(): + if 'src_port' in v and v['src_port'] == interface_name: + return True + if 'dst_port' in v and v['dst_port'] == interface_name: + return True + + return False + +def validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction): + """ Check if SPAN mirror-session config is valid """ + if len(config_db.get_entry('MIRROR_SESSION', session_name)) != 0: + click.echo("Error: {} already exists".format(session_name)) + return False + + vlan_member_table = config_db.get_table('VLAN_MEMBER') + mirror_table = config_db.get_table('MIRROR_SESSION') + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') + interface_table = config_db.get_table('INTERFACE') + + if dst_port: + if not interface_name_is_valid(dst_port): + click.echo("Error: Destination Interface {} is invalid".format(dst_port)) + return False + + if interface_is_in_vlan(vlan_member_table, dst_port): + click.echo("Error: Destination Interface {} has vlan config".format(dst_port)) + return False + + if interface_has_mirror_config(mirror_table, dst_port): + click.echo("Error: Destination Interface {} already has mirror config".format(dst_port)) + return False + + if interface_is_in_portchannel(portchannel_member_table, dst_port): + click.echo("Error: Destination Interface {} has portchannel config".format(dst_port)) + return False + + if interface_is_router_port(interface_table, dst_port): + click.echo("Error: Destination Interface {} is a L3 interface".format(dst_port)) + return False + + if src_port: + for port in src_port.split(","): + if not interface_name_is_valid(port): + click.echo("Error: Source Interface {} is invalid".format(port)) + return False + if dst_port and dst_port == port: + click.echo("Error: Destination Interface cant be same as Source Interface") + return False + if interface_has_mirror_config(mirror_table, port): + click.echo("Error: Source Interface {} already has mirror config".format(port)) + return False + + if direction: + if direction not in ['rx', 'tx', 'both']: + click.echo("Error: Direction {} is invalid".format(direction)) + return False + + return True # This is our main entrypoint - the main 'config' command @click.group(cls=AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @@ -1030,6 +1123,8 @@ def portchannel_member(ctx): def add_portchannel_member(ctx, portchannel_name, port_name): """Add member to port channel""" db = ctx.obj['db'] + if interface_is_mirror_dst_port(db, port_name): + ctx.fail("{} is configured as mirror destination port".format(port_name)) db.set_entry('PORTCHANNEL_MEMBER', (portchannel_name, port_name), {'NULL': 'NULL'}) @@ -1051,7 +1146,11 @@ def del_portchannel_member(ctx, portchannel_name, port_name): def mirror_session(): pass -@mirror_session.command() +# +# 'add' subgroup ('config mirror_session add ...') +# + +@mirror_session.command('add') @click.argument('session_name', metavar='', required=True) @click.argument('src_ip', metavar='', required=True) @click.argument('dst_ip', metavar='', required=True) @@ -1061,25 +1160,70 @@ def mirror_session(): @click.argument('queue', metavar='[queue]', required=False) @click.option('--policer') def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): - """ - Add mirror session - """ + """ Add ERSPAN mirror session.(Legacy support) """ + add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer) + +@mirror_session.group(cls=AbbreviationGroup, name='erspan') +@click.pass_context +def erspan(ctx): + """ ERSPAN mirror_session """ + pass + + +# +# 'add' subcommand +# + +@erspan.command('add') +@click.argument('session_name', metavar='', required=True) +@click.argument('src_ip', metavar='', required=True) +@click.argument('dst_ip', metavar='', required=True) +@click.argument('dscp', metavar='', required=True) +@click.argument('ttl', metavar='', required=True) +@click.argument('gre_type', metavar='[gre_type]', required=False) +@click.argument('queue', metavar='[queue]', required=False) +@click.argument('src_port', metavar='[src_port]', required=False) +@click.argument('direction', metavar='[direction]', required=False) +@click.option('--policer') +def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction): + """ Add ERSPAN mirror session """ + add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port, direction) + +def gather_session_info(session_info, policer, queue, src_port, direction): + if policer: + session_info['policer'] = policer + + if queue: + session_info['queue'] = queue + + if src_port: + if get_interface_naming_mode() == "alias": + src_port_list = [] + for port in src_port.split(","): + src_port_list.append(interface_alias_to_name(port)) + src_port=",".join(src_port_list) + + session_info['src_port'] = src_port + if not direction: + direction = "both" + session_info['direction'] = direction.upper() + + return session_info + +def add_erspan(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer, src_port=None, direction=None): session_info = { + "type" : "ERSPAN", "src_ip": src_ip, "dst_ip": dst_ip, "dscp": dscp, "ttl": ttl } - if policer is not None: - session_info['policer'] = policer - - if gre_type is not None: + if gre_type: session_info['gre_type'] = gre_type - if queue is not None: - session_info['queue'] = queue - + session_info = gather_session_info(session_info, policer, queue, src_port, direction) + """ For multi-npu platforms we need to program all front asic namespaces """ @@ -1087,20 +1231,73 @@ def add(session_name, src_ip, dst_ip, dscp, ttl, gre_type, queue, policer): if not namespaces['front_ns']: config_db = ConfigDBConnector() config_db.connect() + if validate_mirror_session_config(config_db, session_name, None, src_port, direction) is False: + return config_db.set_entry("MIRROR_SESSION", session_name, session_info) else: per_npu_configdb = {} for front_asic_namespaces in namespaces['front_ns']: per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, None, src_port, direction) is False: + return per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) -@mirror_session.command() +@mirror_session.group(cls=AbbreviationGroup, name='span') +@click.pass_context +def span(ctx): + """ SPAN mirror session """ + pass + +@span.command('add') @click.argument('session_name', metavar='', required=True) -def remove(session_name): +@click.argument('dst_port', metavar='', required=True) +@click.argument('src_port', metavar='[src_port]', required=False) +@click.argument('direction', metavar='[direction]', required=False) +@click.argument('queue', metavar='[queue]', required=False) +@click.option('--policer') +def add(session_name, dst_port, src_port, direction, queue, policer): + """ Add SPAN mirror session """ + add_span(session_name, dst_port, src_port, direction, queue, policer) + +def add_span(session_name, dst_port, src_port, direction, queue, policer): + if get_interface_naming_mode() == "alias": + dst_port = interface_alias_to_name(dst_port) + if dst_port is None: + click.echo("Error: Destination Interface {} is invalid".format(dst_port)) + return + + session_info = { + "type" : "SPAN", + "dst_port": dst_port, + } + + session_info = gather_session_info(session_info, policer, queue, src_port, direction) + """ - Delete mirror session + For multi-npu platforms we need to program all front asic namespaces """ + namespaces = sonic_device_util.get_all_namespaces() + if not namespaces['front_ns']: + config_db = ConfigDBConnector() + config_db.connect() + if validate_mirror_session_config(config_db, session_name, dst_port, src_port, direction) is False: + return + config_db.set_entry("MIRROR_SESSION", session_name, session_info) + else: + per_npu_configdb = {} + for front_asic_namespaces in namespaces['front_ns']: + per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) + per_npu_configdb[front_asic_namespaces].connect() + if validate_mirror_session_config(per_npu_configdb[front_asic_namespaces], session_name, dst_port, src_port, direction) is False: + return + per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, session_info) + + +@mirror_session.command() +@click.argument('session_name', metavar='', required=True) +def remove(session_name): + """ Delete mirror session """ """ For multi-npu platforms we need to program all front asic namespaces @@ -1116,6 +1313,7 @@ def remove(session_name): per_npu_configdb[front_asic_namespaces] = ConfigDBConnector(use_unix_socket_path=True, namespace=front_asic_namespaces) per_npu_configdb[front_asic_namespaces].connect() per_npu_configdb[front_asic_namespaces].set_entry("MIRROR_SESSION", session_name, None) + # # 'pfcwd' group ('config pfcwd ...') # @@ -1390,6 +1588,9 @@ def add_vlan_member(ctx, vid, interface_name, untagged): if len(vlan) == 0: ctx.fail("{} doesn't exist".format(vlan_name)) + if interface_is_mirror_dst_port(db, interface_name): + ctx.fail("{} is configured as mirror destination port".format(interface_name)) + members = vlan.get('members', []) if interface_name in members: if get_interface_naming_mode() == "alias": @@ -1404,7 +1605,7 @@ def add_vlan_member(ctx, vid, interface_name, untagged): for entry in interface_table: if (interface_name == entry[0]): ctx.fail("{} is a L3 interface!".format(interface_name)) - + members.append(interface_name) vlan['members'] = members db.set_entry('VLAN', vlan_name, vlan) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 55dfaff599..8971445418 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -3879,7 +3879,6 @@ This command deletes the SNMP Trap server IP address to which SNMP agent is expe Go Back To [Beginning of the document](#) or [Beginning of this section](#management-vrf) - ## Mirroring ### Mirroring Show commands @@ -3895,10 +3894,16 @@ This command displays all the mirror sessions that are configured. - Example: ``` - admin@sonic:~$ show mirror session - Name Status SRC IP DST IP GRE DSCP TTL Queue - --------- -------- --------- -------- ----- ------ ----- ------- + admin@sonic:~$ show mirror_session + ERSPAN Sessions + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + ------ -------- -------- -------- ----- ------ ----- ------- --------- -------------- ---------- ----------- everflow0 active 10.1.0.32 10.0.0.7 + + SPAN Sessions + Name Status DST Port SRC Port Direction + ------ -------- ---------- ------------- ----------- + port0 active Ethernet0 PortChannel10 rx ``` ### Mirroring Config commands @@ -3906,7 +3911,12 @@ This command displays all the mirror sessions that are configured. **config mirror_session** This command is used to add or remove mirroring sessions. Mirror session is identified by "session_name". -While adding a new session, users need to configure the following fields that are used while forwarding the mirrored packets. +This command supports configuring both SPAN/ERSPAN sessions. +In SPAN user can configure mirroring of list of source ports/LAG to destination port in ingress/egress/both directions. +In ERSPAN user can configure mirroring of list of source ports/LAG to a destination IP. +Both SPAN/ERSPAN support ACL based mirroring and can be used in ACL configurations. + +While adding a new ERSPAN session, users need to configure the following fields that are used while forwarding the mirrored packets. 1) source IP address, 2) destination IP address, @@ -3914,19 +3924,65 @@ While adding a new session, users need to configure the following fields that ar 4) TTL value 5) optional - GRE Type in case if user wants to send the packet via GRE tunnel. GRE type could be anything; it could also be left as empty; by default, it is 0x8949 for Mellanox; and 0x88be for the rest of the chips. 6) optional - Queue in which packets shall be sent out of the device. Valid values 0 to 7 for most of the devices. Users need to know their device and the number of queues supported in that device. +7) optional - Policer which will be used to control the rate at which frames are mirrored. +8) optional - List of source ports which can have both Ethernet and LAG ports. +9) optional - Direction - Mirror session direction when configured along with Source port. (Supported rx/tx/both. default direction is both) - Usage: + ``` + config mirror_session erspan add [gre_type] [queue] [policer ] [source-port-list] [direction] + ``` + + The following command is also supported to be backward compatible. + This command will be deprecated in future releases. ``` config mirror_session add [gre_type] [queue] ``` - Example: ``` - admin@sonic:~$ sudo config mirror_session add mrr_abcd 1.2.3.4 20.21.22.23 8 100 0x6558 0 - admin@sonic:~$ show mirror_session - Name Status SRC IP DST IP GRE DSCP TTL Queue - --------- -------- ----------- ----------- ------ ------ ----- ------- - mrr_abcd inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 + root@T1-2:~# config mirror_session add mrr_legacy 1.2.3.4 20.21.22.23 8 100 0x6558 0 + root@T1-2:~# show mirror_session + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + --------- -------- -------- ----------- ------ ------ ----- ------- --------- -------------- ---------- ----------- + mrr_legacy inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 + + + root@T1-2:~# config mirror_session erspan add mrr_abcd 1.2.3.4 20.21.22.23 8 100 0x6558 0 + root@T1-2:~# show mirror_session + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + --------- -------- -------- ----------- ------ ------ ----- ------- --------- -------------- ---------- ----------- + mrr_abcd inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 + root@T1-2:~# + + root@T1-2:~# config mirror_session erspan add mrr_port 1.2.3.4 20.21.22.23 8 100 0x6558 0 Ethernet0 + root@T1-2:~# show mirror_session + Name Status SRC IP DST IP GRE DSCP TTL Queue Policer Monitor Port SRC Port Direction + --------- -------- -------- ----------- ------ ------ ----- ------- --------- -------------- ---------- ----------- + mrr_port inactive 1.2.3.4 20.21.22.23 0x6558 8 100 0 Ethernet0 both + root@T1-2:~# + ``` + +While adding a new SPAN session, users need to configure the following fields that are used while forwarding the mirrored packets. +1) destination port, +2) optional - List of source ports- List of source ports which can have both Ethernet and LAG ports. +3) optional - Direction - Mirror session direction when configured along with Source port. (Supported rx/tx/both. default direction is both) +4) optional - Queue in which packets shall be sent out of the device. Valid values 0 to 7 for most of the devices. Users need to know their device and the number of queues supported in that device. +5) optional - Policer which will be used to control the rate at which frames are mirrored. + +- Usage: + ``` + config mirror_session span add [source-port-list] [direction] [queue] [policer ] + ``` + +- Example: + ``` + root@T1-2:~# config mirror_session span add port0 Ethernet0 Ethernet4,PortChannel001,Ethernet8 + root@T1-2:~# show mirror_session + Name Status DST Port SRC Port Direction + ------ -------- ---------- --------------------------------- ----------- + port0 active Ethernet0 Ethernet4,PortChannel10,Ethernet8 both + root@T1-2:~# ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#mirroring) From 583043385f5c08fd647bc8b133578c05319d340f Mon Sep 17 00:00:00 2001 From: Praveen Chaudhary Date: Thu, 2 Jul 2020 16:21:25 -0700 Subject: [PATCH 104/111] [config] Add ConfigMgmt class for config validation, delete ports, add ports (#765) Provided a new ConfigMgmt class for - Config Validation - Adding ports - Deleting ports Signed-off-by: Praveen Chaudhary pchaudhary@linkedin.com --- config/config_mgmt.py | 840 ++++++++++++++++++ sonic-utilities-tests/config_mgmt_test.py | 721 +++++++++++++++ .../mock_tables/counters_db.json | 6 + 3 files changed, 1567 insertions(+) create mode 100644 config/config_mgmt.py create mode 100644 sonic-utilities-tests/config_mgmt_test.py diff --git a/config/config_mgmt.py b/config/config_mgmt.py new file mode 100644 index 0000000000..c9db79ea90 --- /dev/null +++ b/config/config_mgmt.py @@ -0,0 +1,840 @@ +''' +config_mgmt.py provides classes for configuration validation and for Dynamic +Port Breakout. +''' +try: + import re + import syslog + + from json import load + from time import sleep as tsleep + from imp import load_source + from jsondiff import diff + from sys import flags + + # SONiC specific imports + import sonic_yang + from swsssdk import ConfigDBConnector, SonicV2Connector, port_util + + # Using load_source to 'import /usr/local/bin/sonic-cfggen as sonic_cfggen' + # since /usr/local/bin/sonic-cfggen does not have .py extension. + load_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + from sonic_cfggen import deep_update, FormatConverter, sort_data + +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +# Globals +YANG_DIR = "/usr/local/yang-models" +CONFIG_DB_JSON_FILE = '/etc/sonic/confib_db.json' +# TODO: Find a place for it on sonic switch. +DEFAULT_CONFIG_DB_JSON_FILE = '/etc/sonic/port_breakout_config_db.json' + +class ConfigMgmt(): + ''' + Class to handle config managment for SONIC, this class will use sonic_yang + to verify config for the commands which are capable of change in config DB. + ''' + + def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): + ''' + Initialise the class, --read the config, --load in data tree. + + Parameters: + source (str): source for input config, default configDb else file. + debug (bool): verbose mode. + allowTablesWithoutYang (bool): allow tables without yang model in + config or not. + + Returns: + void + ''' + try: + self.configdbJsonIn = None + self.configdbJsonOut = None + self.allowTablesWithoutYang = allowTablesWithoutYang + + # logging vars + self.SYSLOG_IDENTIFIER = "ConfigMgmt" + self.DEBUG = debug + + self.sy = sonic_yang.SonicYang(YANG_DIR, debug=debug) + # load yang models + self.sy.loadYangModel() + # load jIn from config DB or from config DB json file. + if source.lower() == 'configdb': + self.readConfigDB() + # treat any other source as file input + else: + self.readConfigDBJson(source) + # this will crop config, xlate and load. + self.sy.loadData(self.configdbJsonIn) + + # Raise if tables without YANG models are not allowed but exist. + if not allowTablesWithoutYang and len(self.sy.tablesWithOutYang): + raise Exception('Config has tables without YANG models') + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise(Exception('ConfigMgmt Class creation failed')) + + return + + def __del__(self): + pass + + def tablesWithoutYang(self): + ''' + Return tables loaded in config for which YANG model does not exist. + + Parameters: + void + + Returns: + tablesWithoutYang (list): list of tables. + ''' + return self.sy.tablesWithOutYang + + def loadData(self, configdbJson): + ''' + Explicit function to load config data in Yang Data Tree. + + Parameters: + configdbJson (dict): dict similar to configDb. + + Returns: + void + ''' + self.sy.loadData(configdbJson) + # Raise if tables without YANG models are not allowed but exist. + if not self.allowTablesWithoutYang and len(self.sy.tablesWithOutYang): + raise Exception('Config has tables without YANG models') + + return + + def validateConfigData(self): + ''' + Validate current config data Tree. + + Parameters: + void + + Returns: + bool + ''' + try: + self.sy.validate_data_tree() + except Exception as e: + self.sysLog(msg='Data Validation Failed') + return False + + self.sysLog(msg='Data Validation successful', doPrint=True) + return True + + def sysLog(self, logLevel=syslog.LOG_INFO, msg=None, doPrint=False): + ''' + Log the msg in syslog file. + + Parameters: + debug : syslog level + msg (str): msg to be logged. + + Returns: + void + ''' + # log debug only if enabled + if self.DEBUG == False and logLevel == syslog.LOG_DEBUG: + return + if flags.interactive !=0 and doPrint == True: + print("{}".format(msg)) + syslog.openlog(self.SYSLOG_IDENTIFIER) + syslog.syslog(logLevel, msg) + syslog.closelog() + + return + + def readConfigDBJson(self, source=CONFIG_DB_JSON_FILE): + ''' + Read the config from a Config File. + + Parameters: + source(str): config file name. + + Returns: + (void) + ''' + self.sysLog(msg='Reading data from {}'.format(source)) + self.configdbJsonIn = readJsonFile(source) + #self.sysLog(msg=type(self.configdbJsonIn)) + if not self.configdbJsonIn: + raise(Exception("Can not load config from config DB json file")) + self.sysLog(msg='Reading Input {}'.format(self.configdbJsonIn)) + + return + + """ + Get config from redis config DB + """ + def readConfigDB(self): + ''' + Read the config in Config DB. Assign it in self.configdbJsonIn. + + Parameters: + (void) + + Returns: + (void) + ''' + self.sysLog(doPrint=True, msg='Reading data from Redis configDb') + # Read from config DB on sonic switch + db_kwargs = dict(); data = dict() + configdb = ConfigDBConnector(**db_kwargs) + configdb.connect() + deep_update(data, FormatConverter.db_to_output(configdb.get_config())) + self.configdbJsonIn = FormatConverter.to_serialized(data) + self.sysLog(syslog.LOG_DEBUG, 'Reading Input from ConfigDB {}'.\ + format(self.configdbJsonIn)) + + return + + def writeConfigDB(self, jDiff): + ''' + Write the diff in Config DB. + + Parameters: + jDiff (dict): config to push in config DB. + + Returns: + void + ''' + self.sysLog(doPrint=True, msg='Writing in Config DB') + db_kwargs = dict(); data = dict() + configdb = ConfigDBConnector(**db_kwargs) + configdb.connect(False) + deep_update(data, FormatConverter.to_deserialized(jDiff)) + data = sort_data(data) + self.sysLog(msg="Write in DB: {}".format(data)) + configdb.mod_config(FormatConverter.output_to_db(data)) + + return + +# End of Class ConfigMgmt + +class ConfigMgmtDPB(ConfigMgmt): + ''' + Config MGMT class for Dynamic Port Breakout(DPB). This is derived from + ConfigMgmt. + ''' + + def __init__(self, source="configDB", debug=False, allowTablesWithoutYang=True): + ''' + Initialise the class + + Parameters: + source (str): source for input config, default configDb else file. + debug (bool): verbose mode. + allowTablesWithoutYang (bool): allow tables without yang model in + config or not. + + Returns: + void + ''' + try: + ConfigMgmt.__init__(self, source=source, debug=debug, \ + allowTablesWithoutYang=allowTablesWithoutYang) + self.oidKey = 'ASIC_STATE:SAI_OBJECT_TYPE_PORT:oid:0x' + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise(Exception('ConfigMgmtDPB Class creation failed')) + + return + + def __del__(self): + pass + + def _checkKeyinAsicDB(self, key, db): + ''' + Check if a key exists in ASIC DB or not. + + Parameters: + db (SonicV2Connector): database. + key (str): key in ASIC DB, with table Seperator if applicable. + + Returns: + (bool): True, if given key is present. + ''' + self.sysLog(msg='Check Key in Asic DB: {}'.format(key)) + try: + # chk key in ASIC DB + if db.exists('ASIC_DB', key): + return True + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise(e) + + return False + + def _checkNoPortsInAsicDb(self, db, ports, portMap): + ''' + Check ASIC DB for PORTs in port List + + Parameters: + db (SonicV2Connector): database. + ports (list): List of ports + portMap (dict): port to OID map. + + Returns: + (bool): True, if all ports are not present. + ''' + try: + # connect to ASIC DB, + db.connect(db.ASIC_DB) + for port in ports: + key = self.oidKey + portMap[port] + if self._checkKeyinAsicDB(key, db) == True: + return False + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + return False + + return True + + def _verifyAsicDB(self, db, ports, portMap, timeout): + ''' + Verify in the Asic DB that port are deleted, Keep on trying till timeout + period. + + Parameters: + db (SonicV2Connector): database. + ports (list): port list to check in ASIC DB. + portMap (dict): oid<->port map. + timeout (int): timeout period + + Returns: + (bool) + ''' + self.sysLog(doPrint=True, msg="Verify Port Deletion from Asic DB, Wait...") + try: + for waitTime in range(timeout): + self.sysLog(logLevel=syslog.LOG_DEBUG, msg='Check Asic DB: {} \ + try'.format(waitTime+1)) + # checkNoPortsInAsicDb will return True if all ports are not + # present in ASIC DB + if self._checkNoPortsInAsicDb(db, ports, portMap): + break + tsleep(1) + + # raise if timer expired + if waitTime + 1 == timeout: + self.sysLog(syslog.LOG_CRIT, "!!! Critical Failure, Ports \ + are not Deleted from ASIC DB, Bail Out !!!", doPrint=True) + raise(Exception("Ports are present in ASIC DB after {} secs".\ + format(timeout))) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return True + + def breakOutPort(self, delPorts=list(), portJson=dict(), force=False, \ + loadDefConfig=True): + ''' + This is the main function for port breakout. Exposed to caller. + + Parameters: + delPorts (list): ports to be deleted. + portJson (dict): Config DB json Part of all Ports, generated from + platform.json. + force (bool): if false return dependecies, else delete dependencies. + loadDefConfig: If loadDefConfig, add default config for ports as well. + + Returns: + (deps, ret) (tuple)[list, bool]: dependecies and success/failure. + ''' + MAX_WAIT = 60 + try: + # delete Port and get the Config diff, deps and True/False + delConfigToLoad, deps, ret = self._deletePorts(ports=delPorts, \ + force=force) + # return dependencies if delete port fails + if ret == False: + return deps, ret + + # add Ports and get the config diff and True/False + addConfigtoLoad, ret = self._addPorts(portJson=portJson, \ + loadDefConfig=loadDefConfig) + # return if ret is False, Great thing, no change is done in Config + if ret == False: + return None, ret + + # Save Port OIDs Mapping Before Deleting Port + dataBase = SonicV2Connector(host="127.0.0.1") + if_name_map, if_oid_map = port_util.get_interface_oid_map(dataBase) + self.sysLog(syslog.LOG_DEBUG, 'if_name_map {}'.format(if_name_map)) + + # If we are here, then get ready to update the Config DB, Update + # deletion of Config first, then verify in Asic DB for port deletion, + # then update addition of ports in config DB. + self.writeConfigDB(delConfigToLoad) + # Verify in Asic DB, + self._verifyAsicDB(db=dataBase, ports=delPorts, portMap=if_name_map, \ + timeout=MAX_WAIT) + self.writeConfigDB(addConfigtoLoad) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + return None, False + + return None, True + + def _deletePorts(self, ports=list(), force=False): + ''' + Delete ports and dependecies from data tree, validate and return resultant + config. + + Parameters: + ports (list): list of ports + force (bool): if false return dependecies, else delete dependencies. + + Returns: + (configToLoad, deps, ret) (tuple)[dict, list, bool]: config, dependecies + and success/fail. + ''' + configToLoad = None; deps = None + try: + self.sysLog(msg="delPorts ports:{} force:{}".format(ports, force)) + + self.sysLog(doPrint=True, msg='Start Port Deletion') + deps = list() + + # Get all dependecies for ports + for port in ports: + xPathPort = self.sy.findXpathPortLeaf(port) + self.sysLog(doPrint=True, msg='Find dependecies for port {}'.\ + format(port)) + dep = self.sy.find_data_dependencies(str(xPathPort)) + if dep: + deps.extend(dep) + + # No further action with no force and deps exist + if force == False and deps: + return configToLoad, deps, False; + + # delets all deps, No topological sort is needed as of now, if deletion + # of deps fails, return immediately + elif deps: + for dep in deps: + self.sysLog(msg='Deleting {}'.format(dep)) + self.sy.deleteNode(str(dep)) + # mark deps as None now, + deps = None + + # all deps are deleted now, delete all ports now + for port in ports: + xPathPort = self.sy.findXpathPort(port) + self.sysLog(doPrint=True, msg="Deleting Port: " + port) + self.sy.deleteNode(str(xPathPort)) + + # Let`s Validate the tree now + if self.validateConfigData()==False: + return configToLoad, deps, False; + + # All great if we are here, Lets get the diff + self.configdbJsonOut = self.sy.getData() + # Update configToLoad + configToLoad = self._updateDiffConfigDB() + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Port Deletion Failed") + return configToLoad, deps, False + + return configToLoad, deps, True + + def _addPorts(self, portJson=dict(), loadDefConfig=True): + ''' + Add ports and default confug in data tree, validate and return resultant + config. + + Parameters: + portJson (dict): Config DB json Part of all Ports, generated from + platform.json. + loadDefConfig: If loadDefConfig, add default config for ports as well. + + Returns: + (configToLoad, ret) (tuple)[dict, bool] + ''' + configToLoad = None + ports = portJson['PORT'].keys() + try: + self.sysLog(doPrint=True, msg='Start Port Addition') + self.sysLog(msg="addPorts Args portjson: {} loadDefConfig: {}".\ + format(portJson, loadDefConfig)) + + if loadDefConfig: + defConfig = self._getDefaultConfig(ports) + self.sysLog(msg='Default Config: {}'.format(defConfig)) + + # get the latest Data Tree, save this in input config, since this + # is our starting point now + self.configdbJsonIn = self.sy.getData() + + # Get the out dict as well, if not done already + if self.configdbJsonOut is None: + self.configdbJsonOut = self.sy.getData() + + # update portJson in configdbJsonOut PORT part + self.configdbJsonOut['PORT'].update(portJson['PORT']) + # merge new config with data tree, this is json level merge. + # We do not allow new table merge while adding default config. + if loadDefConfig: + self.sysLog(doPrint=True, msg="Merge Default Config for {}".\ + format(ports)) + self._mergeConfigs(self.configdbJsonOut, defConfig, True) + + # create a tree with merged config and validate, if validation is + # sucessful, then configdbJsonOut contains final and valid config. + self.sy.loadData(self.configdbJsonOut) + if self.validateConfigData()==False: + return configToLoad, False + + # All great if we are here, Let`s get the diff and update COnfig + configToLoad = self._updateDiffConfigDB() + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Port Addition Failed") + return configToLoad, False + + return configToLoad, True + + def _mergeConfigs(self, D1, D2, uniqueKeys=True): + ''' + Merge D2 dict in D1 dict, Note both first and second dict will change. + First Dict will have merged part D1 + D2. Second dict will have D2 - D1 + i.e [unique keys in D2]. Unique keys in D2 will be merged in D1 only + if uniqueKeys=True. + Usage: This function can be used with 'config load' command to merge + new config with old. + + Parameters: + D1 (dict): Partial Config 1. + D2 (dict): Partial Config 2. + uniqueKeys (bool) + + Returns: + bool + ''' + try: + def _mergeItems(it1, it2): + if isinstance(it1, list) and isinstance(it2, list): + it1.extend(it2) + elif isinstance(it1, dict) and isinstance(it2, dict): + self._mergeConfigs(it1, it2) + elif isinstance(it1, list) or isinstance(it2, list): + raise Exception("Can not merge Configs, List problem") + elif isinstance(it1, dict) or isinstance(it2, dict): + raise Exception("Can not merge Configs, Dict problem") + else: + # First Dict takes priority + pass + return + + for it in D1.keys(): + # D2 has the key + if D2.get(it): + _mergeItems(D1[it], D2[it]) + del D2[it] + + # if uniqueKeys are needed, merge rest of the keys of D2 in D1 + if uniqueKeys: + D1.update(D2) + except Exce as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Merge Config failed") + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return D1 + + def _searchKeysInConfig(self, In, Out, skeys): + ''' + Search Relevant Keys in Input Config using DFS, This function is mainly + used to search ports related config in Default ConfigDbJson file. + + Parameters: + In (dict): Input Config to be searched + skeys (list): Keys to be searched in Input Config i.e. search Keys. + Out (dict): Contains the search result, i.e. Output Config with skeys. + + Returns: + found (bool): True if any of skeys is found else False. + ''' + found = False + if isinstance(In, dict): + for key in In.keys(): + for skey in skeys: + # pattern is very specific to current primary keys in + # config DB, may need to be updated later. + pattern = '^' + skey + '\|' + '|' + skey + '$' + \ + '|' + '^' + skey + '$' + reg = re.compile(pattern) + if reg.search(key): + # In primary key, only 1 match can be found, so return + Out[key] = In[key] + found = True + break + # Put the key in Out by default, if not added already. + # Remove later, if subelements does not contain any port. + if Out.get(key) is None: + Out[key] = type(In[key])() + if self._searchKeysInConfig(In[key], Out[key], skeys) == False: + del Out[key] + else: + found = True + + elif isinstance(In, list): + for skey in skeys: + if skey in In: + found = True + Out.append(skey) + + else: + # nothing for other keys + pass + + return found + + def configWithKeys(self, configIn=dict(), keys=list()): + ''' + This function returns the config with relavant keys in Input Config. + It calls _searchKeysInConfig. + + Parameters: + configIn (dict): Input Config + keys (list): Key list. + + Returns: + configOut (dict): Output Config containing only key related config. + ''' + configOut = dict() + try: + if len(configIn) and len(keys): + self._searchKeysInConfig(configIn, configOut, skeys=keys) + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="configWithKeys Failed, Error: {}".format(str(e))) + raise e + + return configOut + + def _getDefaultConfig(self, ports=list()): + ''' + Create a default Config for given Port list from Default Config File. + It calls _searchKeysInConfig. + + Parameters: + ports (list): list of ports, for which default config must be fetched. + + Returns: + defConfigOut (dict): default Config for given Ports. + ''' + # function code + try: + self.sysLog(doPrint=True, msg="Generating default config for {}".format(ports)) + defConfigIn = readJsonFile(DEFAULT_CONFIG_DB_JSON_FILE) + defConfigOut = dict() + self._searchKeysInConfig(defConfigIn, defConfigOut, skeys=ports) + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="getDefaultConfig Failed, Error: {}".format(str(e))) + raise e + + return defConfigOut + + def _updateDiffConfigDB(self): + ''' + Return ConfigDb format Diff b/w self.configdbJsonIn, self.configdbJsonOut + + Parameters: + void + + Returns: + configToLoad (dict): ConfigDb format Diff + ''' + try: + # Get the Diff + self.sysLog(msg='Generate Final Config to write in DB') + configDBdiff = self._diffJson() + # Process diff and create Config which can be updated in Config DB + configToLoad = self._createConfigToLoad(configDBdiff, \ + self.configdbJsonIn, self.configdbJsonOut) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Config Diff Generation failed") + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return configToLoad + + def _createConfigToLoad(self, diff, inp, outp): + ''' + Create the config to write in Config DB, i.e. compitible with mod_config() + This functions has 3 inner functions: + -- _deleteHandler: to handle delete in diff. See example below. + -- _insertHandler: to handle insert in diff. See example below. + -- _recurCreateConfig: recursively create this config. + + Parameters: + diff: jsondiff b/w 2 configs. + Example: + {u'VLAN': {u'Vlan100': {'members': {delete: [(95, 'Ethernet1')]}}, + u'Vlan777': {u'members': {insert: [(92, 'Ethernet2')]}}}, + 'PORT': {delete: {u'Ethernet1': {...}}}} + + inp: input config before delete/add ports, i.e. current config Db. + outp: output config after delete/add ports. i.e. config DB once diff + is applied. + + Returns: + configToLoad (dict): config in a format compitible with mod_Config(). + ''' + + ### Internal Functions ### + def _deleteHandler(diff, inp, outp, config): + ''' + Handle deletions in diff dict + ''' + if isinstance(inp, dict): + # Example Case: diff = PORT': {delete: {u'Ethernet1': {...}}}} + for key in diff: + # make sure keys from diff are present in inp but not in outp + if key in inp and key not in outp: + # assign key to None(null), redis will delete entire key + config[key] = None + else: + # should not happen + raise Exception('Invalid deletion of {} in diff'.format(key)) + + elif isinstance(inp, list): + # Example case: {u'VLAN': {u'Vlan100': {'members': {delete: [(95, 'Ethernet1')]}} + # just take list from outputs + config.extend(outp) + return + + def _insertHandler(diff, inp, outp, config): + ''' + Handle inserts in diff dict + ''' + if isinstance(outp, dict): + # Example Case: diff = PORT': {insert: {u'Ethernet1': {...}}}} + for key in diff: + # make sure keys are only in outp + if key not in inp and key in outp: + # assign key in config same as outp + config[key] = outp[key] + else: + # should not happen + raise Exception('Invalid insertion of {} in diff'.format(key)) + + elif isinstance(outp, list): + # just take list from output + # Example case: {u'VLAN': {u'Vlan100': {'members': {insert: [(95, 'Ethernet1')]}} + config.extend(outp) + return + + def _recurCreateConfig(diff, inp, outp, config): + ''' + Recursively iterate diff to generate config to write in configDB + ''' + changed = False + # updates are represented by list in diff and as dict in outp\inp + # we do not allow updates right now + if isinstance(diff, list) and isinstance(outp, dict): + return changed + + idx = -1 + for key in diff: + idx = idx + 1 + if str(key) == '$delete': + _deleteHandler(diff[key], inp, outp, config) + changed = True + elif str(key) == '$insert': + _insertHandler(diff[key], inp, outp, config) + changed = True + else: + # insert in config by default, remove later if not needed + if isinstance(diff, dict): + # config should match type of outp + config[key] = type(outp[key])() + if _recurCreateConfig(diff[key], inp[key], outp[key], \ + config[key]) == False: + del config[key] + else: + changed = True + elif isinstance(diff, list): + config.append(key) + if _recurCreateConfig(diff[idx], inp[idx], outp[idx], \ + config[-1]) == False: + del config[-1] + else: + changed = True + + return changed + + ### Function Code ### + try: + configToLoad = dict() + _recurCreateConfig(diff, inp, outp, configToLoad) + + except Exception as e: + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, \ + msg="Create Config to load in DB, Failed") + self.sysLog(doPrint=True, logLevel=syslog.LOG_ERR, msg=str(e)) + raise e + + return configToLoad + + def _diffJson(self): + ''' + Return json diff between self.configdbJsonIn, self.configdbJsonOut dicts. + + Parameters: + void + + Returns: + (dict): json diff between self.configdbJsonIn, self.configdbJsonOut + dicts. + Example: + {u'VLAN': {u'Vlan100': {'members': {delete: [(95, 'Ethernet1')]}}, + u'Vlan777': {u'members': {insert: [(92, 'Ethernet2')]}}}, + 'PORT': {delete: {u'Ethernet1': {...}}}} + ''' + return diff(self.configdbJsonIn, self.configdbJsonOut, syntax='symmetric') + +# end of class ConfigMgmtDPB + +# Helper Functions +def readJsonFile(fileName): + ''' + Read Json file. + + Parameters: + fileName (str): file + + Returns: + result (dict): json --> dict + ''' + try: + with open(fileName) as f: + result = load(f) + except Exception as e: + raise Exception(e) + + return result diff --git a/sonic-utilities-tests/config_mgmt_test.py b/sonic-utilities-tests/config_mgmt_test.py new file mode 100644 index 0000000000..aec7f75e30 --- /dev/null +++ b/sonic-utilities-tests/config_mgmt_test.py @@ -0,0 +1,721 @@ +import imp +import os +# import file under test i.e. config_mgmt.py +imp.load_source('config_mgmt', \ + os.path.join(os.path.dirname(__file__), '..', 'config', 'config_mgmt.py')) +import config_mgmt + +from unittest import TestCase +from mock import MagicMock, call +from json import dump + +class TestConfigMgmt(TestCase): + ''' + Test Class for config_mgmt.py + ''' + + def setUp(self): + config_mgmt.CONFIG_DB_JSON_FILE = "startConfigDb.json" + config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE = "portBreakOutConfigDb.json" + return + + def test_config_validation(self): + curConfig = dict(configDbJson) + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cm = config_mgmt.ConfigMgmt(source=config_mgmt.CONFIG_DB_JSON_FILE) + assert cm.validateConfigData() == True + return + + def test_table_without_yang(self): + curConfig = dict(configDbJson) + unknown = {"unknown_table": {"ukey": "uvalue"}} + self.updateConfig(curConfig, unknown) + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cm = config_mgmt.ConfigMgmt(source=config_mgmt.CONFIG_DB_JSON_FILE) + #assert "unknown_table" in cm.tablesWithoutYang() + return + + def test_search_keys(self): + curConfig = dict(configDbJson) + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cmdpb = config_mgmt.ConfigMgmtDPB(source=config_mgmt.CONFIG_DB_JSON_FILE) + out = cmdpb.configWithKeys(portBreakOutConfigDbJson, \ + ["Ethernet8","Ethernet9"]) + assert "VLAN" not in out.keys() + assert "INTERFACE" not in out.keys() + for k in out['ACL_TABLE'].keys(): + # only ports must be chosen + len(out['ACL_TABLE'][k]) == 1 + out = cmdpb.configWithKeys(portBreakOutConfigDbJson, \ + ["Ethernet10","Ethernet11"]) + assert "INTERFACE" in out.keys() + for k in out['ACL_TABLE'].keys(): + # only ports must be chosen + len(out['ACL_TABLE'][k]) == 1 + return + + def test_break_out(self): + # prepare default config + self.writeJson(portBreakOutConfigDbJson, \ + config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE) + # prepare config dj json to start with + curConfig = dict(configDbJson) + #Ethernet8: start from 4x25G-->2x50G with -f -l + self.dpb_port8_4x25G_2x50G_f_l(curConfig) + #Ethernet8: move from 2x50G-->1x100G without force, list deps + self.dpb_port8_2x50G_1x100G(curConfig) + # Ethernet8: move from 2x50G-->1x100G with force, where deps exists + self.dpb_port8_2x50G_1x100G_f(curConfig) + # Ethernet8: move from 1x100G-->4x25G without force, no deps + self.dpb_port8_1x100G_4x25G(curConfig) + # Ethernet8: move from 4x25G-->1x100G with force, no deps + self.dpb_port8_4x25G_1x100G_f(curConfig) + # Ethernet8: move from 1x100G-->1x50G(2)+2x25G(2) with -f -l, + self.dpb_port8_1x100G_1x50G_2x25G_f_l(curConfig) + # Ethernet4: breakout from 4x25G to 2x50G with -f -l + self.dpb_port4_4x25G_2x50G_f_l(curConfig) + return + + def tearDown(self): + try: + os.remove(config_mgmt.CONFIG_DB_JSON_FILE) + os.remove(config_mgmt.DEFAULT_CONFIG_DB_JSON_FILE) + except Exception as e: + pass + return + + ########### HELPER FUNCS ##################################### + def writeJson(self, d, file): + with open(file, 'w') as f: + dump(d, f, indent=4) + return + + def config_mgmt_dpb(self, curConfig): + ''' + config_mgmt.ConfigMgmtDPB class instance with mocked functions. Not using + pytest fixture, because it is used in non test funcs. + + Parameter: + curConfig (dict): Config to start with. + + Return: + cmdpb (ConfigMgmtDPB): Class instance of ConfigMgmtDPB. + ''' + # create object + self.writeJson(curConfig, config_mgmt.CONFIG_DB_JSON_FILE) + cmdpb = config_mgmt.ConfigMgmtDPB(source=config_mgmt.CONFIG_DB_JSON_FILE) + # mock funcs + cmdpb.writeConfigDB = MagicMock(return_value=True) + cmdpb._verifyAsicDB = MagicMock(return_value=True) + import mock_tables.dbconnector + return cmdpb + + def generate_args(self, portIdx, laneIdx, curMode, newMode): + ''' + Generate port to deleted, added and {lanes, speed} setting based on + current and new mode. + Example: + For generate_args(8, 73, '4x25G', '2x50G'): + output: + ( + ['Ethernet8', 'Ethernet9', 'Ethernet10', 'Ethernet11'], + ['Ethernet8', 'Ethernet10'], + {'Ethernet8': {'lanes': '73,74', 'speed': '50000'}, + 'Ethernet10': {'lanes': '75,76', 'speed': '50000'}}) + + Parameters: + portIdx (int): Port Index. + laneIdx (int): Lane Index. + curMode (str): current breakout mode of Port. + newMode (str): new breakout mode of Port. + + Return: + dPorts, pJson (tuple)[list, dict] + ''' + # default params + pre = "Ethernet" + laneMap = {"4x25G": [1,1,1,1], "2x50G": [2,2], "1x100G":[4], \ + "1x50G(2)+2x25G(2)":[2,1,1], "2x25G(2)+1x50G(2)":[1,1,2]} + laneSpeed = 25000 + # generate dPorts + l = list(laneMap[curMode]); l.insert(0, 0); id = portIdx; dPorts = list() + for i in l[:-1]: + id = id + i + portName = portName = "{}{}".format(pre, id) + dPorts.append(portName) + # generate aPorts + l = list(laneMap[newMode]); l.insert(0, 0); id = portIdx; aPorts = list() + for i in l[:-1]: + id = id + i + portName = portName = "{}{}".format(pre, id) + aPorts.append(portName) + # generate pJson + l = laneMap[newMode]; pJson = {"PORT": {}}; li = laneIdx; pi = 0 + for i in l: + speed = laneSpeed*i + lanes = [str(li+j) for j in range(i)]; lanes = ','.join(lanes) + pJson['PORT'][aPorts[pi]] = {"speed": str(speed), "lanes": str(lanes)} + li = li+i; pi = pi + 1 + return dPorts, pJson + + def updateConfig(self, conf, uconf): + ''' + update the config to emulate continous breakingout a single port. + + Parameters: + conf (dict): current config in config DB. + uconf (dict): config Diff to be pushed in config DB. + + Return: + void + conf will be updated with uconf, i.e. config diff. + ''' + try: + for it in uconf.keys(): + # if conf has the key + if conf.get(it): + # if marked for deletion + if uconf[it] == None: + del conf[it] + else: + if isinstance(conf[it], list) and isinstance(uconf[it], list): + conf[it] = list(uconf[it]) + elif isinstance(conf[it], dict) and isinstance(uconf[it], dict): + self.updateConfig(conf[it], uconf[it]) + else: + conf[it] = uconf[it] + del uconf[it] + # update new keys in conf + conf.update(uconf) + except Exception as e: + print("update Config failed") + print(e) + raise e + return + + def checkResult(self, cmdpb, delConfig, addConfig): + ''' + Usual result check in many test is: Make sure delConfig and addConfig is + pushed in order to configDb + + Parameters: + cmdpb (ConfigMgmtDPB): Class instance of ConfigMgmtDPB. + delConfig (dict): config Diff to be pushed in config DB while deletion + of ports. + addConfig (dict): config Diff to be pushed in config DB while addition + of ports. + + Return: + void + ''' + calls = [call(delConfig), call(addConfig)] + assert cmdpb.writeConfigDB.call_count == 2 + cmdpb.writeConfigDB.assert_has_calls(calls, any_order=False) + return + + def postUpdateConfig(self, curConfig, delConfig, addConfig): + ''' + After breakout, update the config to emulate continous breakingout a + single port. + + Parameters: + curConfig (dict): current Config in config DB. + delConfig (dict): config Diff to be pushed in config DB while deletion + of ports. + addConfig (dict): config Diff to be pushed in config DB while addition + of ports. + + Return: + void + curConfig will be updated with delConfig and addConfig. + ''' + # update the curConfig with change + self.updateConfig(curConfig, delConfig) + self.updateConfig(curConfig, addConfig) + return + + def dpb_port8_1x100G_1x50G_2x25G_f_l(self, curConfig): + ''' + Breakout Port 8 1x100G->1x50G_2x25G with -f -l + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='1x100G', newMode='1x50G(2)+2x25G(2)') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=True, loadDefConfig=True) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'PORT': { + u'Ethernet8': None + } + } + addConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4', 'Ethernet8', 'Ethernet10'] + }, + u'NO-NSW-PACL-TEST': { + u'ports': ['Ethernet11'] + } + }, + u'INTERFACE': { + u'Ethernet11|2a04:1111:40:a709::1/126': { + u'scope': u'global', + u'family': u'IPv6' + }, + u'Ethernet11': {} + }, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': { + u'tagging_mode': u'untagged' + }, + u'Vlan100|Ethernet11': { + u'tagging_mode': u'untagged' + } + }, + u'PORT': { + 'Ethernet8': { + 'speed': '50000', + 'lanes': '73,74' + }, + 'Ethernet10': { + 'speed': '25000', + 'lanes': '75' + }, + 'Ethernet11': { + 'speed': '25000', + 'lanes': '76' + } + } + } + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port8_4x25G_1x100G_f(self, curConfig): + ''' + Breakout Port 8 4x25G->1x100G with -f + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='4x25G', newMode='1x100G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=False, loadDefConfig=False) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'PORT': { + u'Ethernet8': None, + u'Ethernet9': None, + u'Ethernet10': None, + u'Ethernet11': None + } + } + addConfig = pJson + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port8_1x100G_4x25G(self, curConfig): + ''' + Breakout Port 8 1x100G->4x25G + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='1x100G', newMode='4x25G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=False, loadDefConfig=False) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'PORT': { + u'Ethernet8': None + } + } + addConfig = pJson + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port8_2x50G_1x100G_f(self, curConfig): + ''' + Breakout Port 8 2x50G->1x100G with -f + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='2x50G', newMode='1x100G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=True, loadDefConfig=False) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4'] + } + }, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': None + }, + u'PORT': { + u'Ethernet8': None, + u'Ethernet10': None + } + } + addConfig = pJson + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + + def dpb_port8_2x50G_1x100G(self, curConfig): + ''' + Breakout Port 8 2x50G->1x100G + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='2x50G', newMode='1x100G') + deps, ret = cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, + force=False, loadDefConfig=False) + # Expected Result + assert ret == False and len(deps) == 3 + assert cmdpb.writeConfigDB.call_count == 0 + return + + def dpb_port8_4x25G_2x50G_f_l(self, curConfig): + ''' + Breakout Port 8 4x25G->2x50G with -f -l + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=8, laneIdx=73, \ + curMode='4x25G', newMode='2x50G') + cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, \ + loadDefConfig=True) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4'] + }, + u'NO-NSW-PACL-TEST': { + u'ports': None + } + }, + u'INTERFACE': None, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': None, + u'Vlan100|Ethernet11': None + }, + u'PORT': { + u'Ethernet8': None, + u'Ethernet9': None, + u'Ethernet10': None, + u'Ethernet11': None + } + } + addConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet4', 'Ethernet8', 'Ethernet10'] + } + }, + u'VLAN_MEMBER': { + u'Vlan100|Ethernet8': { + u'tagging_mode': u'untagged' + } + }, + u'PORT': { + 'Ethernet8': { + 'speed': '50000', + 'lanes': '73,74' + }, + 'Ethernet10': { + 'speed': '50000', + 'lanes': '75,76' + } + } + } + assert cmdpb.writeConfigDB.call_count == 2 + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + + def dpb_port4_4x25G_2x50G_f_l(self, curConfig): + ''' + Breakout Port 4 4x25G->2x50G with -f -l + + Parameters: + curConfig (dict): current Config in config DB. + + Return: + void + assert for success and failure. + ''' + cmdpb = self.config_mgmt_dpb(curConfig) + # create ARGS + dPorts, pJson = self.generate_args(portIdx=4, laneIdx=69, \ + curMode='4x25G', newMode='2x50G') + cmdpb.breakOutPort(delPorts=dPorts, portJson=pJson, force=True, \ + loadDefConfig=True) + # Expected Result delConfig and addConfig is pushed in order + delConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet8', 'Ethernet10'] + } + }, + u'PORT': { + u'Ethernet4': None, + u'Ethernet5': None, + u'Ethernet6': None, + u'Ethernet7': None + } + } + addConfig = { + u'ACL_TABLE': { + u'NO-NSW-PACL-V4': { + u'ports': ['Ethernet0', 'Ethernet8', 'Ethernet10', 'Ethernet4'] + } + }, + u'PORT': { + 'Ethernet4': { + 'speed': '50000', + 'lanes': '69,70' + }, + 'Ethernet6': { + 'speed': '50000', + 'lanes': '71,72' + } + } + } + self.checkResult(cmdpb, delConfig, addConfig) + self.postUpdateConfig(curConfig, delConfig, addConfig) + return + +###########GLOBAL Configs##################################### +configDbJson = { + "ACL_TABLE": { + "NO-NSW-PACL-TEST": { + "policy_desc": "NO-NSW-PACL-TEST", + "type": "L3", + "stage": "INGRESS", + "ports": [ + "Ethernet9", + "Ethernet11", + ] + }, + "NO-NSW-PACL-V4": { + "policy_desc": "NO-NSW-PACL-V4", + "type": "L3", + "stage": "INGRESS", + "ports": [ + "Ethernet0", + "Ethernet4", + "Ethernet8", + "Ethernet10" + ] + } + }, + "VLAN": { + "Vlan100": { + "admin_status": "up", + "description": "server_vlan", + "dhcp_servers": [ + "10.186.72.116" + ] + }, + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet0": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet2": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet11": { + "tagging_mode": "untagged" + }, + }, + "INTERFACE": { + "Ethernet10": {}, + "Ethernet10|2a04:0000:40:a709::1/126": { + "scope": "global", + "family": "IPv6" + } + }, + "PORT": { + "Ethernet0": { + "alias": "Eth1/1", + "lanes": "65", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet1": { + "alias": "Eth1/2", + "lanes": "66", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet2": { + "alias": "Eth1/3", + "lanes": "67", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet3": { + "alias": "Eth1/4", + "lanes": "68", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet4": { + "alias": "Eth2/1", + "lanes": "69", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet5": { + "alias": "Eth2/2", + "lanes": "70", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet6": { + "alias": "Eth2/3", + "lanes": "71", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet7": { + "alias": "Eth2/4", + "lanes": "72", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet8": { + "alias": "Eth3/1", + "lanes": "73", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet9": { + "alias": "Eth3/2", + "lanes": "74", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet10": { + "alias": "Eth3/3", + "lanes": "75", + "description": "", + "speed": "25000", + "admin_status": "up" + }, + "Ethernet11": { + "alias": "Eth3/4", + "lanes": "76", + "description": "", + "speed": "25000", + "admin_status": "up" + } + } +} + +portBreakOutConfigDbJson = { + "ACL_TABLE": { + "NO-NSW-PACL-TEST": { + "ports": [ + "Ethernet9", + "Ethernet11", + ] + }, + "NO-NSW-PACL-V4": { + "policy_desc": "NO-NSW-PACL-V4", + "ports": [ + "Ethernet0", + "Ethernet4", + "Ethernet8", + "Ethernet10" + ] + } + }, + "VLAN": { + "Vlan100": { + "admin_status": "up", + "description": "server_vlan", + "dhcp_servers": [ + "10.186.72.116" + ] + } + }, + "VLAN_MEMBER": { + "Vlan100|Ethernet8": { + "tagging_mode": "untagged" + }, + "Vlan100|Ethernet11": { + "tagging_mode": "untagged" + } + }, + "INTERFACE": { + "Ethernet11": {}, + "Ethernet11|2a04:1111:40:a709::1/126": { + "scope": "global", + "family": "IPv6" + } + } +} diff --git a/sonic-utilities-tests/mock_tables/counters_db.json b/sonic-utilities-tests/mock_tables/counters_db.json index 2476837d71..2b2b600280 100644 --- a/sonic-utilities-tests/mock_tables/counters_db.json +++ b/sonic-utilities-tests/mock_tables/counters_db.json @@ -145,6 +145,12 @@ "Ethernet4": "oid:0x1000000000004", "Ethernet8": "oid:0x1000000000006" }, + "COUNTERS_LAG_NAME_MAP": { + "PortChannel0001": "oid:0x60000000005a1", + "PortChannel0002": "oid:0x60000000005a2", + "PortChannel0003": "oid:0x600000000063c", + "PortChannel0004": "oid:0x600000000063d" + }, "COUNTERS_DEBUG_NAME_PORT_STAT_MAP": { "DEBUG_0": "SAI_PORT_STAT_IN_DROP_REASON_RANGE_BASE", "DEBUG_2": "SAI_PORT_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" From 4e2a53e4899b41f15c84df0ebed362cef4a32b79 Mon Sep 17 00:00:00 2001 From: Sujin Kang Date: Wed, 8 Jul 2020 11:24:47 -0700 Subject: [PATCH 105/111] Add watchdogutil to control the hw watchdog (#945) * Add watchdogutil to control the hw watchdog * fix LGTM * Fixed based on review comments * replace the is_armed() and get_remaining_time to status() subcommand * syntax error * Add more info to the output * re-format of output * remove spaces * change the version number of watchdogutil * Change the output parsing for the watchdog arm case * typo * fix more review comments --- setup.py | 2 + watchdogutil/__init__.py | 0 watchdogutil/main.py | 137 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 139 insertions(+) create mode 100644 watchdogutil/__init__.py create mode 100644 watchdogutil/main.py diff --git a/setup.py b/setup.py index 8bccb843f1..38090071b0 100644 --- a/setup.py +++ b/setup.py @@ -53,6 +53,7 @@ 'sonic-utilities-tests', 'undebug', 'utilities_common', + 'watchdogutil', ], package_data={ 'show': ['aliases.ini'], @@ -136,6 +137,7 @@ 'sonic-clear = clear.main:cli', 'sonic_installer = sonic_installer.main:cli', 'undebug = undebug.main:cli', + 'watchdogutil = watchdogutil.main:watchdogutil', ] }, # NOTE: sonic-utilities also depends on other packages that are either only diff --git a/watchdogutil/__init__.py b/watchdogutil/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/watchdogutil/main.py b/watchdogutil/main.py new file mode 100644 index 0000000000..df0f72c780 --- /dev/null +++ b/watchdogutil/main.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +# +# main.py +# +# Command-line utility for interacting with HW Watchdog in SONiC +# + +try: + import sys + import os + import click + import syslog + import sonic_platform +except ImportError as e: + raise ImportError("%s - required module not found" % str(e)) + +VERSION = "1.0" + +SYSLOG_IDENTIFIER = "watchdogutil" + +WATCHDOG_LOAD_ERROR = -1 +CHASSIS_LOAD_ERROR = -2 + +# Global platform-specific watchdog class instance +platform_watchdog = None + + +# ========================== Syslog wrappers ========================== + + +def log_info(msg, also_print_to_console=False): + syslog.openlog(SYSLOG_IDENTIFIER) + syslog.syslog(syslog.LOG_INFO, msg) + syslog.closelog() + + if also_print_to_console: + click.echo(msg) + + +def log_warning(msg, also_print_to_console=False): + syslog.openlog(SYSLOG_IDENTIFIER) + syslog.syslog(syslog.LOG_WARNING, msg) + syslog.closelog() + + if also_print_to_console: + click.echo(msg) + + +def log_error(msg, also_print_to_console=False): + syslog.openlog(SYSLOG_IDENTIFIER) + syslog.syslog(syslog.LOG_ERR, msg) + syslog.closelog() + + if also_print_to_console: + click.echo(msg) + + +# ==================== Methods for initialization ==================== + +# Loads platform specific watchdog module from source +def load_platform_watchdog(): + global platform_watchdog + + platform = sonic_platform.platform.Platform() + + chassis = platform.get_chassis() + if not chassis: + log_error("Failed to get chassis") + return CHASSIS_LOAD_ERROR + + platform_watchdog = chassis.get_watchdog() + if not platform_watchdog: + log_error("Failed to get watchdog module") + return WATCHDOG_LOAD_ERROR + + return 0 + + +# ==================== CLI commands and groups ==================== + + +# This is our main entrypoint - the main 'watchdogutil' command +@click.group() +def watchdogutil(): + """watchdogutil - Command line utility for providing HW watchdog interface""" + + if os.geteuid() != 0: + click.echo("Root privileges are required for this operation") + sys.exit(1) + + # Load platform-specific watchdog class + err = load_platform_watchdog() + if err != 0: + sys.exit(2) + +# 'version' subcommand +@watchdogutil.command() +def version(): + """Display version info""" + click.echo("watchdogutil version {0}".format(VERSION)) + +# 'status' subcommand +@watchdogutil.command() +def status(): + """Check the watchdog status with remaining_time if it's armed""" + status = platform_watchdog.is_armed() + remaining_time = platform_watchdog.get_remaining_time() + if status is True: + click.echo("Status: Armed") + click.echo("Time remaining: {} seconds".format(remaining_time)) + else: + click.echo("Status: Unarmed") + + +# 'disarm' subcommand +@watchdogutil.command() +def disarm(): + """Disarm HW watchdog""" + result = platform_watchdog.disarm() + if result is True: + click.echo("Watchdog disarmed successfully") + else: + click.echo("Failed to disarm Watchdog") + +# 'arm' subcommand +@watchdogutil.command() +@click.option('-s', '--seconds', default=180, type=int, help="the default timeout of HW watchdog") +def arm(seconds): + """Arm HW watchdog""" + result = int(platform_watchdog.arm(seconds)) + if result < 0: + click.echo("Failed to arm Watchdog for {} seconds".format(seconds)) + else: + click.echo("Watchdog armed for {} seconds".format(result)) + +if __name__ == '__main__': + watchdogutil() From 569ab64d9d8f172a36be810a24ed3f2d16419bf2 Mon Sep 17 00:00:00 2001 From: Akhilesh Samineni <47657796+AkhileshSamineni@users.noreply.github.com> Date: Thu, 9 Jul 2020 03:00:23 +0530 Subject: [PATCH 106/111] Separated the notifications for "sonic-clear nat translations" and "sonic-clear nat statistics" command (#892) Signed-off-by: Akhilesh Samineni --- scripts/natclear | 15 +++++++++++---- scripts/natshow | 10 ++++++++-- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/scripts/natclear b/scripts/natclear index 7883c8fd65..1a22f7f100 100644 --- a/scripts/natclear +++ b/scripts/natclear @@ -23,12 +23,19 @@ class NatClear(object): self.db.connect(self.db.APPL_DB) return - def send_notification(self, op, data): + def send_entries_notification(self, op, data): opdata = [op,data] msg = json.dumps(opdata,separators=(',',':')) - self.db.publish('APPL_DB','FLUSHNATREQUEST', msg) + self.db.publish('APPL_DB','FLUSHNATENTRIES', msg) return + def send_statistics_notification(self, op, data): + opdata = [op,data] + msg = json.dumps(opdata,separators=(',',':')) + self.db.publish('APPL_DB','FLUSHNATSTATISTICS', msg) + return + + def main(): parser = argparse.ArgumentParser(description='Clear the nat information', formatter_class=argparse.RawTextHelpFormatter, @@ -49,11 +56,11 @@ def main(): try: nat = NatClear() if clear_translations: - nat.send_notification("ENTRIES", "ALL") + nat.send_entries_notification("ENTRIES", "ALL") print "" print("Dynamic NAT entries are cleared.") elif clear_statistics: - nat.send_notification("STATISTICS", "ALL") + nat.send_statistics_notification("STATISTICS", "ALL") print "" print("NAT statistics are cleared.") except Exception as e: diff --git a/scripts/natshow b/scripts/natshow index ed99b001e7..02945a524b 100644 --- a/scripts/natshow +++ b/scripts/natshow @@ -132,6 +132,8 @@ class NatShow(object): continue ip_protocol = "all" + source = "---" + destination = "---" translated_dst = "---" translated_src = "---" @@ -249,7 +251,7 @@ class NatShow(object): napt_keys = re.split(':', napt_entry) napt_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TABLE:{}'.format(napt_entry)) - ip_protocol = napt_keys[0] + ip_protocol = napt_keys[0].lower() source = "---" destination = "---" @@ -278,6 +280,8 @@ class NatShow(object): nat_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAT_TWICE_TABLE:{}'.format(nat_twice_entry)) ip_protocol = "all" + source = "---" + destination = "---" source = nat_twice_keys[0] destination = nat_twice_keys[1] @@ -301,7 +305,9 @@ class NatShow(object): napt_twice_keys = re.split(':', napt_twice_entry) napt_twice_values = self.appl_db.get_all(self.appl_db.APPL_DB,'NAPT_TWICE_TABLE:{}'.format(napt_twice_entry)) - ip_protocol = napt_twice_keys[0] + ip_protocol = napt_twice_keys[0].lower() + source = "---" + destination = "---" source = napt_twice_keys[1] + ':' + napt_twice_keys[2] destination = napt_twice_keys[3] + ':' + napt_twice_keys[4] From b51a7e89bb294798459b1fbabb7b415ae3d9276a Mon Sep 17 00:00:00 2001 From: judyjoseph <53951155+judyjoseph@users.noreply.github.com> Date: Thu, 9 Jul 2020 13:54:26 -0700 Subject: [PATCH 107/111] [Multi NPU] Time Improvements to the config reload/load_minigraph commands (#917) * Improvements to the config reload/load_minigraph commands in Multi NPU platforms by parallelizing with threads. --- config/main.py | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/config/main.py b/config/main.py index bd37962005..e639e4106f 100755 --- a/config/main.py +++ b/config/main.py @@ -9,6 +9,7 @@ import syslog import time import netifaces +import threading import sonic_device_util import ipaddress @@ -121,6 +122,16 @@ def get_command(self, ctx, cmd_name): # Helper functions # +# Execute action per NPU instance for multi instance services. +def execute_systemctl_per_asic_instance(inst, event, service, action): + try: + click.echo("Executing {} of service {}@{}...".format(action, service, inst)) + run_command("systemctl {} {}@{}.service".format(action, service, inst)) + except SystemExit as e: + log_error("Failed to execute {} of service {}@{} with error {}".format(action, service, inst, e)) + # Set the event object if there is a failure and exception was raised. + event.set() + # Execute action on list of systemd services def execute_systemctl(list_of_services, action): num_asic = sonic_device_util.get_num_npus() @@ -138,14 +149,27 @@ def execute_systemctl(list_of_services, action): except SystemExit as e: log_error("Failed to execute {} of service {} with error {}".format(action, service, e)) raise + if (service + '.service' in generated_multi_instance_services): - for inst in range(num_asic): - try: - click.echo("Executing {} of service {}@{}...".format(action, service, inst)) - run_command("systemctl {} {}@{}.service".format(action, service, inst)) - except SystemExit as e: - log_error("Failed to execute {} of service {}@{} with error {}".format(action, service, inst, e)) - raise + # With Multi NPU, Start a thread per instance to do the "action" on multi instance services. + if sonic_device_util.is_multi_npu(): + threads = [] + # Use this event object to co-ordinate if any threads raised exception + e = threading.Event() + + kwargs = {'service': service, 'action': action} + for inst in range(num_asic): + t = threading.Thread(target=execute_systemctl_per_asic_instance, args=(inst, e), kwargs=kwargs) + threads.append(t) + t.start() + + # Wait for all the threads to finish. + for inst in range(num_asic): + threads[inst].join() + + # Check if any of the threads have raised exception, if so exit the process. + if e.is_set(): + sys.exit(1) def run_command(command, display_cmd=False, ignore_error=False): """Run bash command and print output to stdout From 9f2404320451cf6fbdc6c58b2d447eaf9da2fbef Mon Sep 17 00:00:00 2001 From: Sujin Kang Date: Thu, 9 Jul 2020 17:17:48 -0700 Subject: [PATCH 108/111] Enable HW watchdog before fast-reboot (#977) --- scripts/fast-reboot | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 0d874e7f14..644955533b 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -13,6 +13,7 @@ STRICT=no REBOOT_METHOD="/sbin/kexec -e" ASSISTANT_IP_LIST="" ASSISTANT_SCRIPT="/usr/bin/neighbor_advertiser" +WATCHDOG_UTIL="/usr/bin/watchdogutil" DEVPATH="/usr/share/sonic/device" PLATFORM=$(sonic-cfggen -H -v DEVICE_METADATA.localhost.platform) PLATFORM_PLUGIN="${REBOOT_TYPE}_plugin" @@ -609,6 +610,12 @@ if [ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_PLUGIN} ]; then ${DEVPATH}/${PLATFORM}/${PLATFORM_PLUGIN} fi +# Enable Watchdog Timer +if [ -x ${WATCHDOG_UTIL} ]; then + debug "Enabling Watchdog before ${REBOOT_TYPE}" + ${WATCHDOG_UTIL} arm +fi + # Reboot: explicity call Linux native reboot under sbin debug "Rebooting with ${REBOOT_METHOD} to ${NEXT_SONIC_IMAGE} ..." exec ${REBOOT_METHOD} From 6961a91c32af96c48c641a57283b04def4fbd78a Mon Sep 17 00:00:00 2001 From: Prince Sunny Date: Fri, 10 Jul 2020 19:30:58 -0700 Subject: [PATCH 109/111] Intf table migration for APP_DB entries during warmboot (#980) --- scripts/db_migrator.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index 4ea89e0bff..c6c5f90945 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -5,6 +5,7 @@ import argparse import syslog from swsssdk import ConfigDBConnector, SonicDBConfig +from swsssdk import SonicV2Connector import sonic_device_util @@ -52,6 +53,10 @@ def __init__(self, namespace, socket=None): self.configDB = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace, **db_kwargs) self.configDB.db_connect('CONFIG_DB') + self.appDB = SonicV2Connector(host='127.0.0.1') + if self.appDB is not None: + self.appDB.connect(self.appDB.APPL_DB) + def migrate_pfc_wd_table(self): ''' Migrate all data entries from table PFC_WD_TABLE to PFC_WD @@ -99,6 +104,41 @@ def migrate_interface_table(self): self.configDB.set_entry(table, key[0], data[key]) if_db.append(key[0]) + def migrate_intf_table(self): + ''' + Migrate all data from existing INTF table in APP DB during warmboot with IP Prefix + to have an additional ONE entry without IP Prefix. For. e.g, for an entry + "Vlan1000:192.168.0.1/21": {}", this function shall add an entry without + IP prefix as ""Vlan1000": {}". This also migrates 'lo' to 'Loopback0' interface + ''' + + if self.appDB is None: + return + + if_db = [] + data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") + for key in data: + if_name = key.split(":")[1] + if if_name == "lo": + self.appDB.delete(self.appDB.APPL_DB, key) + key = key.replace(if_name, "Loopback0") + log_info('Migrating lo entry to ' + key) + self.appDB.set(self.appDB.APPL_DB, key, 'NULL', 'NULL') + + if '/' not in key: + if_db.append(key.split(":")[1]) + continue + + data = self.appDB.keys(self.appDB.APPL_DB, "INTF_TABLE:*") + for key in data: + if_name = key.split(":")[1] + if if_name in if_db: + continue + log_info('Migrating intf table for ' + if_name) + table = "INTF_TABLE:" + if_name + self.appDB.set(self.appDB.APPL_DB, table, 'NULL', 'NULL') + if_db.append(if_name) + def mlnx_migrate_buffer_pool_size(self): """ On Mellanox platform the buffer pool size changed since @@ -213,6 +253,7 @@ def version_unknown(self): # upgrade will take care of the subsequent migrations. self.migrate_pfc_wd_table() self.migrate_interface_table() + self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' @@ -223,6 +264,7 @@ def version_1_0_1(self): log_info('Handling version_1_0_1') self.migrate_interface_table() + self.migrate_intf_table() self.set_version('version_1_0_2') return 'version_1_0_2' From 9d20212e319e5bb8ef5d276c98d4b0d7f6b35d82 Mon Sep 17 00:00:00 2001 From: Tamer Ahmed Date: Sat, 11 Jul 2020 16:31:21 -0700 Subject: [PATCH 110/111] [filter-fdb] Fix For Vlan Defined With No CIDR (#976) VLAN_INTERFACE section in Config_db may contain entries with no CIDR. This fix skip those entries. signed-off-by: Tamer Ahmed --- scripts/filter_fdb_entries.py | 2 ++ sonic-utilities-tests/filter_fdb_input/test_vectors.py | 1 + 2 files changed, 3 insertions(+) diff --git a/scripts/filter_fdb_entries.py b/scripts/filter_fdb_entries.py index d7f93d3e1e..31d4204ec9 100755 --- a/scripts/filter_fdb_entries.py +++ b/scripts/filter_fdb_entries.py @@ -31,6 +31,8 @@ def get_vlan_cidr_map(filename): vlan_cidr = defaultdict() if "VLAN_INTERFACE" in config_db_entries.keys() and "VLAN" in config_db_entries.keys(): for vlan_key in config_db_entries["VLAN_INTERFACE"].keys(): + if '|' not in vlan_key: + continue vlan, cidr = tuple(vlan_key.split('|')) if vlan in config_db_entries["VLAN"]: vlan_cidr[vlan] = ip_interface(cidr).network diff --git a/sonic-utilities-tests/filter_fdb_input/test_vectors.py b/sonic-utilities-tests/filter_fdb_input/test_vectors.py index cd1592a0a4..2321da47af 100644 --- a/sonic-utilities-tests/filter_fdb_input/test_vectors.py +++ b/sonic-utilities-tests/filter_fdb_input/test_vectors.py @@ -198,6 +198,7 @@ "Vlan1000": {} }, "VLAN_INTERFACE": { + "Vlan1000": {}, "Vlan1000|192.168.128.1/21": {} }, }, From 1f61b8acf370f2639c612ffa835dc60dbbd0b825 Mon Sep 17 00:00:00 2001 From: Abhishek Dosi Date: Mon, 3 Aug 2020 17:05:09 -0700 Subject: [PATCH 111/111] Fixed show interface status to get Interface to Port-Channel mapping information from APP DB instead of Config DB. Reason was when we add/remove memeber from Port-Channel using command teamdctl (eg: teamdctl PortChannel0002 port remove Ethernet0) show interface status does not get updated and provide worng information. Signed-off-by: Abhishek Dosi --- scripts/intfutil | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/scripts/intfutil b/scripts/intfutil index f51c945936..ea1c0bcf3d 100755 --- a/scripts/intfutil +++ b/scripts/intfutil @@ -195,9 +195,9 @@ def tuple_to_dict(tup, new_dict): return new_dict -def get_raw_portchannel_info(config_db): +def get_raw_portchannel_info(aapl_db): """ - This function uses the redis config_db as input and gets the "PORTCHANNEL_MEMBER" table + This function uses the redis aapl_db as input and gets the "LAG_MEMBER_TABLE" table create >>> get_po_int_configdb_info = get_portchannel_info(config_db) >>> pprint(get_po_int_configdb_info) @@ -208,26 +208,30 @@ def get_raw_portchannel_info(config_db): ('PortChannel0004', 'Ethernet124'): {}} This function returns a dictionary with the key being portchannels and interface tuple. """ - get_raw_po_int_configdb_info = config_db.get_table('PORTCHANNEL_MEMBER') - return get_raw_po_int_configdb_info # Return a dictionary with the key being the portchannel and interface + get_raw_po_int_appdb_info = {} + team_keys = aapl_db.keys(aapl_db.APPL_DB, "LAG_MEMBER_TABLE:"+"*") + for key in team_keys: + team_info = key.split(":") + get_raw_po_int_appdb_info[(team_info[1], team_info[2])] = {} + return get_raw_po_int_appdb_info # Return a dictionary with the key being the portchannel and interface -def get_portchannel_list(get_raw_po_int_configdb_info): +def get_portchannel_list(get_raw_po_int_appdb_info): """ - >>> portchannel_list = get_portchannel_list(get_raw_po_int_configdb_info) + >>> portchannel_list = get_portchannel_list(get_raw_po_int_appdb_info) >>> pprint(portchannel_list) ['PortChannel0001', 'PortChannel0002', 'PortChannel0003', 'PortChannel0004'] >>> """ portchannel_list = [] - for po in get_raw_po_int_configdb_info: + for po in get_raw_po_int_appdb_info: portchannel = po[0] if portchannel not in portchannel_list: portchannel_list.append(portchannel) return natsorted(portchannel_list) -def create_po_int_tuple_list(get_raw_po_int_configdb_info): +def create_po_int_tuple_list(get_raw_po_int_appdb_info): """ - >>> po_int_tuple = get_raw_po_int_configdb_info.keys() + >>> po_int_tuple = get_raw_po_int_appdb_info.keys() >>> pprint(po_int_tuple_list) [('PortChannel0001', 'Ethernet108'), ('PortChannel0002', 'Ethernet116'), @@ -236,7 +240,7 @@ def create_po_int_tuple_list(get_raw_po_int_configdb_info): ('PortChannel0001', 'Ethernet112')] >>> """ - po_int_tuple_list = get_raw_po_int_configdb_info.keys() + po_int_tuple_list = get_raw_po_int_appdb_info.keys() return po_int_tuple_list def create_po_int_dict(po_int_tuple_list): @@ -442,9 +446,9 @@ class IntfStatus(object): self.front_panel_ports_list = get_frontpanel_port_list(self.config_db) appl_db_keys = appl_db_keys_get(self.appl_db, self.front_panel_ports_list, None) self.int_to_vlan_dict = get_interface_vlan_dict(self.config_db) - self.get_raw_po_int_configdb_info = get_raw_portchannel_info(self.config_db) - self.portchannel_list = get_portchannel_list(self.get_raw_po_int_configdb_info) - self.po_int_tuple_list = create_po_int_tuple_list(self.get_raw_po_int_configdb_info) + self.get_raw_po_int_appdb_info = get_raw_portchannel_info(self.appl_db) + self.portchannel_list = get_portchannel_list(self.get_raw_po_int_appdb_info) + self.po_int_tuple_list = create_po_int_tuple_list(self.get_raw_po_int_appdb_info) self.po_int_dict = create_po_int_dict(self.po_int_tuple_list) self.int_po_dict = create_int_to_portchannel_dict(self.po_int_tuple_list) self.combined_int_to_vlan_po_dict = merge_dicts(self.int_to_vlan_dict, self.int_po_dict)