From 1e2e27b5c85087b5622101486d3f22ac34b5ee39 Mon Sep 17 00:00:00 2001 From: Xin Wang Date: Wed, 5 Nov 2025 16:23:35 +0800 Subject: [PATCH 1/2] Compatibility fixes for docker-sonic-mgmt based on Ubuntu 24.04 (#21045) What is the motivation for this PR? This PR is to continue the effort made by @yutongzhang-microsoft in #18339 The current docker-sonic-mgmt image is based on Ubuntu 20.04 which is end of support now. PR sonic-net/sonic-buildimage#24306 upgraded the base image of docker-sonic-mgmt to Ubuntu 24.04. Together, version of most packages are upgraded too. The upgrade introduced lots of compatibility issues. This PR is to fix all the compatibility issues. All the fixes are backward compatible. The code change woks with both current and new docker-sonic-mgmt. How did you do it? Fix the snmp code caused by pysnmp upgrade. Fix json dump of ansible result caused by pytest-ansible upgrade. How did you verify/test it? Take advantage of the current sonic-mgmt PR testing. Verified that the code change works with new docker-sonic-mgmt in #20851 Verified that the code change works with the current docker-sonic-mgmt in this PR. --- .azure-pipelines/pytest-collect-only.yml | 8 +- ansible/devutil/devices/ansible_hosts.py | 8 + ansible/library/lldp_facts.py | 234 +++- ansible/library/snmp_facts.py | 1023 ++++++++++++++++-- ansible/roles/vm_set/tasks/add_ceos_list.yml | 2 +- ansible/roles/vm_set/tasks/main.yml | 1 + ansible/roles/vm_set/tasks/stop_sonic_vm.yml | 2 +- tests/common/devices/base.py | 5 +- tests/common/plugins/ansible_fixtures.py | 8 + tests/conftest.py | 27 - tests/ptf_runner.py | 6 +- tests/test_nbr_health.py | 2 +- 12 files changed, 1162 insertions(+), 164 deletions(-) diff --git a/.azure-pipelines/pytest-collect-only.yml b/.azure-pipelines/pytest-collect-only.yml index 4f279f13e0c..5750405ecff 100644 --- a/.azure-pipelines/pytest-collect-only.yml +++ b/.azure-pipelines/pytest-collect-only.yml @@ -1,8 +1,8 @@ steps: - script: | - sudo apt-get update - sudo apt-get install \ + sudo apt-get -o DPkg::Lock::Timeout=180 update + sudo apt-get -o DPkg::Lock::Timeout=180 install \ ca-certificates \ curl \ gnupg \ @@ -12,8 +12,8 @@ steps: echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] \ https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - sudo apt-get update - sudo apt-get install docker-ce docker-ce-cli containerd.io -y + sudo apt-get -o DPkg::Lock::Timeout=180 update + sudo apt-get -o DPkg::Lock::Timeout=180 install docker-ce docker-ce-cli containerd.io -y displayName: 'Install Docker' - checkout: self diff --git a/ansible/devutil/devices/ansible_hosts.py b/ansible/devutil/devices/ansible_hosts.py index 2cec9217967..26fdf69758a 100644 --- a/ansible/devutil/devices/ansible_hosts.py +++ b/ansible/devutil/devices/ansible_hosts.py @@ -53,6 +53,14 @@ except Exception as e: logging.error("Hack for https://github.com/ansible/pytest-ansible/issues/47 failed: {}".format(repr(e))) +try: + # Initialize ansible plugin loader to avoid issues with ansbile-core 2.18 + from ansible.plugins.loader import init_plugin_loader + init_plugin_loader() +except ImportError: + # Nothing need to do for ansible-core 2.13 + pass + class UnsupportedAnsibleModule(Exception): pass diff --git a/ansible/library/lldp_facts.py b/ansible/library/lldp_facts.py index 339b4335b0e..04896e69569 100644 --- a/ansible/library/lldp_facts.py +++ b/ansible/library/lldp_facts.py @@ -3,11 +3,22 @@ import json from collections import defaultdict from ansible.module_utils.basic import AnsibleModule -try: + +import asyncio +import pysnmp + +if pysnmp.version[0] < 5: from pysnmp.entity.rfc3413.oneliner import cmdgen - has_pysnmp = True -except Exception: - has_pysnmp = False +else: + from pysnmp.hlapi.v3arch.asyncio import ( + cmdgen, + UdpTransportTarget, + walk_cmd, + SnmpEngine, + ContextData, + ObjectType, + ObjectIdentity + ) DOCUMENTATION = ''' --- @@ -90,11 +101,12 @@ def __init__(self, dotprefix=False): else: dp = "" - # From IF-MIB + # From IF-MIB, refer to https://mibs.observium.org/mib/IF-MIB/ # ifdescr is common support, replace the lldpportid self.if_descr = dp + ".3.6.1.2.1.2.2.1.2" # From LLDP-MIB + self.lldp_rem_entry = dp + ".0.8802.1.1.2.1.4.1.1" # for snmp_walk self.lldp_rem_port_id = dp + ".0.8802.1.1.2.1.4.1.1.7" self.lldp_rem_port_desc = dp + ".0.8802.1.1.2.1.4.1.1.8" self.lldp_rem_sys_desc = dp + ".0.8802.1.1.2.1.4.1.1.10" @@ -129,27 +141,12 @@ def get_iftable(snmp_data): return (if_table, inverse_if_table) -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True), - version=dict(required=True, choices=['v2', 'v2c', 'v3']), - community=dict(required=False, default=False), - username=dict(required=False), - level=dict(required=False, choices=['authNoPriv', 'authPriv']), - integrity=dict(required=False, choices=['md5', 'sha']), - privacy=dict(required=False, choices=['des', 'aes']), - authkey=dict(required=False), - privkey=dict(required=False), - removeplaceholder=dict(required=False)), - required_together=(['username', 'level', 'integrity', 'authkey'], [ - 'privacy', 'privkey'],), - supports_check_mode=False) +def Tree(): + return defaultdict(Tree) - m_args = module.params - if not has_pysnmp: - module.fail_json(msg='Missing required pysnmp module (check docs)') +def main_legacy(module): + m_args = module.params cmd_gen = cmdgen.CommandGenerator() @@ -196,8 +193,6 @@ def main(): # Use v without a prefix to use with return values v = DefineOid(dotprefix=False) - def Tree(): return defaultdict(Tree) - results = Tree() host = m_args['host'] @@ -278,4 +273,189 @@ def Tree(): return defaultdict(Tree) module.exit_json(ansible_facts=results) -main() +class LLDPFactsCollector: + + def __init__(self, module): + self.module = module + self.m_args = module.params + self.results = Tree() + self.if_table = dict() + self.inverse_if_table = dict() + self.snmp_engine = SnmpEngine() + self.context = ContextData() + self.transport = None + self._init_auth() + self.p = DefineOid(dotprefix=True) + self.v = DefineOid(dotprefix=False) + + def _init_auth(self): + # Verify that we receive a community when using snmp v2 + if self.m_args['version'] == "v2" or self.m_args['version'] == "v2c": + if self.m_args['community'] is False: + self.module.fail_json( + msg='Community not set when using snmp version 2' + ) + + if self.m_args['version'] == "v3": + if self.m_args['username'] is None: + self.module.fail_json( + msg='Username not set when using snmp version 3' + ) + + if self.m_args['level'] == "authPriv" and self.m_args['privacy'] is None: + self.module.fail_json( + msg='Privacy algorithm not set when using authPriv' + ) + + if self.m_args['integrity'] == "sha": + integrity_proto = cmdgen.usmHMACSHAAuthProtocol + elif self.m_args['integrity'] == "md5": + integrity_proto = cmdgen.usmHMACMD5AuthProtocol + + if self.m_args['privacy'] == "aes": + privacy_proto = cmdgen.usmAesCfb128Protocol + elif self.m_args['privacy'] == "des": + privacy_proto = cmdgen.usmDESPrivProtocol + + # Use SNMP Version 2 + if self.m_args['version'] == "v2" or self.m_args['version'] == "v2c": + self.snmp_auth = cmdgen.CommunityData(self.m_args['community']) + + # Use SNMP Version 3 with authNoPriv + elif self.m_args['level'] == "authNoPriv": + self.snmp_auth = cmdgen.UsmUserData( + self.m_args['username'], + authKey=self.m_args['authkey'], + authProtocol=integrity_proto + ) + # Use SNMP Version 3 with authPriv + else: + self.snmp_auth = cmdgen.UsmUserData( + self.m_args['username'], + authKey=self.m_args['authkey'], + privKey=self.m_args['privkey'], + authProtocol=integrity_proto, + privProtocol=privacy_proto + ) + + async def setup(self): + self.transport = await UdpTransportTarget.create( + (self.m_args['host'], 161), + timeout=self.m_args['timeout'] + ) + + async def collect(self): + if self.transport is None: + raise Exception('Transport not initialized. Call setup() first.') + + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.if_descr)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying if_descr." + ) + + for oid, val in varBinds: + ifIndex = str(oid).split(".")[-1] + ifDescr = str(val) + self.if_table[ifDescr] = ifIndex + self.inverse_if_table[ifIndex] = ifDescr + + lldp_rem_sys = dict() + lldp_rem_port_id = dict() + lldp_rem_port_desc = dict() + lldp_rem_chassis_id = dict() + lldp_rem_sys_desc = dict() + + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.lldp_rem_entry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying lldp_rem_entry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + ifIndex = str(current_oid).split(".")[-2] + + try: + if_name = self.inverse_if_table[ifIndex] + except Exception: + print( + json.dumps({"unbound_interface_index": ifIndex}) + ) + module.fail_json(msg="unboundinterface in inverse if table") + + if self.v.lldp_rem_sys_name in current_oid: + lldp_rem_sys[if_name] = current_val + elif self.v.lldp_rem_port_id in current_oid: + lldp_rem_port_id[if_name] = current_val + elif self.v.lldp_rem_port_desc in current_oid: + lldp_rem_port_desc[if_name] = current_val + elif self.v.lldp_rem_chassis_id in current_oid: + lldp_rem_chassis_id[if_name] = current_val + elif self.v.lldp_rem_sys_desc in current_oid: + lldp_rem_sys_desc[if_name] = current_val + + lldp_data = dict() + + for if_name in lldp_rem_sys: + lldp_data[if_name] = { + 'neighbor_sys_name': lldp_rem_sys[if_name], + 'neighbor_port_desc': lldp_rem_port_desc[if_name], + 'neighbor_port_id': lldp_rem_port_id[if_name], + 'neighbor_sys_desc': lldp_rem_sys_desc[if_name], + 'neighbor_chassis_id': lldp_rem_chassis_id[if_name] + } + + self.results['ansible_lldp_facts'] = lldp_data + + +async def main(module): + collector = LLDPFactsCollector(module) + await collector.setup() + await collector.collect() + module.exit_json(ansible_facts=collector.results) + + +if __name__ == '__main__': + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True), + timeout=dict(reqired=False, type='int', default=20), + version=dict(required=True, choices=['v2', 'v2c', 'v3']), + community=dict(required=False, default=False), + username=dict(required=False), + level=dict(required=False, choices=['authNoPriv', 'authPriv']), + integrity=dict(required=False, choices=['md5', 'sha']), + privacy=dict(required=False, choices=['des', 'aes']), + authkey=dict(required=False), + privkey=dict(required=False), + removeplaceholder=dict(required=False)), + required_together=( + ['username', 'level', 'integrity', 'authkey'], + ['privacy', 'privkey'], + ), + supports_check_mode=False + ) + + if pysnmp.version[0] < 5: + main_legacy(module) + else: + asyncio.run(main(module)) diff --git a/ansible/library/snmp_facts.py b/ansible/library/snmp_facts.py index 141e11dc44b..906d32ce7f6 100644 --- a/ansible/library/snmp_facts.py +++ b/ansible/library/snmp_facts.py @@ -18,7 +18,32 @@ from collections import defaultdict from ansible.module_utils.basic import AnsibleModule -import six + +import logging +import datetime +from ansible.module_utils.debug_utils import config_module_logging + +import asyncio +import pysnmp +import ipaddress + +from pyasn1.type import univ +from pysnmp.proto import rfc1902 + +if pysnmp.version[0] < 5: + from pysnmp.entity.rfc3413.oneliner import cmdgen +else: + from pysnmp.hlapi.v3arch.asyncio import ( + cmdgen, + UdpTransportTarget, + get_cmd, + walk_cmd, + SnmpEngine, + ContextData, + ObjectType, + ObjectIdentity + ) + DOCUMENTATION = ''' --- module: snmp_facts @@ -99,15 +124,6 @@ ''' -try: - from pysnmp.proto import rfc1902 - from pysnmp.entity.rfc3413.oneliner import cmdgen - from pyasn1.type import univ - has_pysnmp = True -except Exception: - has_pysnmp = False - - class DefineOid(object): def __init__(self, dotprefix=False): @@ -116,7 +132,7 @@ def __init__(self, dotprefix=False): else: dp = "" - # From SNMPv2-MIB + # From SNMPv2-MIB, refer to https://mibs.observium.org/mib/SNMPv2-MIB/ self.sysDescr = dp + "1.3.6.1.2.1.1.1.0" self.sysObjectId = dp + "1.3.6.1.2.1.1.2.0" self.sysUpTime = dp + "1.3.6.1.2.1.1.3.0" @@ -124,7 +140,8 @@ def __init__(self, dotprefix=False): self.sysName = dp + "1.3.6.1.2.1.1.5.0" self.sysLocation = dp + "1.3.6.1.2.1.1.6.0" - # From IF-MIB + # From IF-MIB, refer to https://mibs.observium.org/mib/IF-MIB/ + self.ifEntry = dp + "1.3.6.1.2.1.2.2.1" # For walk_cmd self.ifIndex = dp + "1.3.6.1.2.1.2.2.1.1" self.ifDescr = dp + "1.3.6.1.2.1.2.2.1.2" self.ifType = dp + "1.3.6.1.2.1.2.2.1.3" @@ -133,19 +150,21 @@ def __init__(self, dotprefix=False): self.ifPhysAddress = dp + "1.3.6.1.2.1.2.2.1.6" self.ifAdminStatus = dp + "1.3.6.1.2.1.2.2.1.7" self.ifOperStatus = dp + "1.3.6.1.2.1.2.2.1.8" - self.ifHighSpeed = dp + "1.3.6.1.2.1.31.1.1.1.15" - self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" - + self.ifInUcastPkts = dp + "1.3.6.1.2.1.2.2.1.11" self.ifInDiscards = dp + "1.3.6.1.2.1.2.2.1.13" - self.ifOutDiscards = dp + "1.3.6.1.2.1.2.2.1.19" self.ifInErrors = dp + "1.3.6.1.2.1.2.2.1.14" + self.ifOutUcastPkts = dp + "1.3.6.1.2.1.2.2.1.17" + self.ifOutDiscards = dp + "1.3.6.1.2.1.2.2.1.19" self.ifOutErrors = dp + "1.3.6.1.2.1.2.2.1.20" + + self.ifXEntry = dp + "1.3.6.1.2.1.31.1.1.1" # For walk_cmd self.ifHCInOctets = dp + "1.3.6.1.2.1.31.1.1.1.6" self.ifHCOutOctets = dp + "1.3.6.1.2.1.31.1.1.1.10" - self.ifInUcastPkts = dp + "1.3.6.1.2.1.2.2.1.11" - self.ifOutUcastPkts = dp + "1.3.6.1.2.1.2.2.1.17" + self.ifHighSpeed = dp + "1.3.6.1.2.1.31.1.1.1.15" + self.ifAlias = dp + "1.3.6.1.2.1.31.1.1.1.18" - # From entity table MIB + # From entity table MIB, refer to https://mibs.observium.org/mib/ENTITY-MIB/ + self.entPhysicalEntry = dp + "1.3.6.1.2.1.47.1.1.1.1" # For walk_cmd self.entPhysDescr = dp + "1.3.6.1.2.1.47.1.1.1.1.2" self.entPhysContainedIn = dp + "1.3.6.1.2.1.47.1.1.1.1.4" self.entPhysClass = dp + "1.3.6.1.2.1.47.1.1.1.1.5" @@ -159,38 +178,43 @@ def __init__(self, dotprefix=False): self.entPhysModelName = dp + "1.3.6.1.2.1.47.1.1.1.1.13" self.entPhysIsFRU = dp + "1.3.6.1.2.1.47.1.1.1.1.16" - # From entity sensor MIB + # From entity sensor MIB, refer to https://mibs.observium.org/mib/ENTITY-SENSOR-MIB/ + self.entPhySensorEntry = dp + "1.3.6.1.2.1.99.1.1.1" # For walk_cmd self.entPhySensorType = dp + "1.3.6.1.2.1.99.1.1.1.1" self.entPhySensorScale = dp + "1.3.6.1.2.1.99.1.1.1.2" self.entPhySensorPrecision = dp + "1.3.6.1.2.1.99.1.1.1.3" self.entPhySensorValue = dp + "1.3.6.1.2.1.99.1.1.1.4" self.entPhySensorOperStatus = dp + "1.3.6.1.2.1.99.1.1.1.5" - # From IP-MIB + # From IP-MIB, refer to https://mibs.observium.org/mib/IP-MIB/ + self.ipAddrEntry = dp + "1.3.6.1.2.1.4.20.1" # For walk_cmd self.ipAdEntAddr = dp + "1.3.6.1.2.1.4.20.1.1" self.ipAdEntIfIndex = dp + "1.3.6.1.2.1.4.20.1.2" self.ipAdEntNetMask = dp + "1.3.6.1.2.1.4.20.1.3" - # From LLDP-MIB: lldpLocalSystemData + # From LLDP-MIB: lldpLocalSystemData, refer to https://mibs.observium.org/mib/LLDP-MIB/ self.lldpLocChassisIdSubtype = dp + "1.0.8802.1.1.2.1.3.1" self.lldpLocChassisId = dp + "1.0.8802.1.1.2.1.3.2" self.lldpLocSysName = dp + "1.0.8802.1.1.2.1.3.3" self.lldpLocSysDesc = dp + "1.0.8802.1.1.2.1.3.4" - # From LLDP-MIB: lldpLocPortTable + # From LLDP-MIB: lldpLocPortTable, refer to https://mibs.observium.org/mib/LLDP-MIB/ + self.lldpLocPortEntry = dp + "1.0.8802.1.1.2.1.3.7.1" # For walk_cmd self.lldpLocPortIdSubtype = dp + "1.0.8802.1.1.2.1.3.7.1.2" # + .ifindex self.lldpLocPortId = dp + "1.0.8802.1.1.2.1.3.7.1.3" # + .ifindex self.lldpLocPortDesc = dp + "1.0.8802.1.1.2.1.3.7.1.4" # + .ifindex - # From LLDP-MIB: lldpLocManAddrTables + # From LLDP-MIB: lldpLocManAddrTables, refer to https://mibs.observium.org/mib/LLDP-MIB/ + self.lldpLocManAddrEntry = dp + "1.0.8802.1.1.2.1.3.8.1" # For walk_cmd self.lldpLocManAddrLen = dp + "1.0.8802.1.1.2.1.3.8.1.3" # + .subtype + .man addr self.lldpLocManAddrIfSubtype = dp + \ "1.0.8802.1.1.2.1.3.8.1.4" # + .subtype + .man addr self.lldpLocManAddrIfId = dp + "1.0.8802.1.1.2.1.3.8.1.5" # + .subtype + .man addr self.lldpLocManAddrOID = dp + "1.0.8802.1.1.2.1.3.8.1.6" # + .subtype + .man addr - # From LLDP-MIB: lldpRemTable + # From LLDP-MIB: lldpRemTable, refer to https://mibs.observium.org/mib/LLDP-MIB/ # + .time mark + .ifindex + .rem index + self.lldpRemEntry = dp + "1.0.8802.1.1.2.1.4.1.1" # For walk_cmd self.lldpRemChassisIdSubtype = dp + "1.0.8802.1.1.2.1.4.1.1.4" # + .time mark + .ifindex + .rem index self.lldpRemChassisId = dp + "1.0.8802.1.1.2.1.4.1.1.5" @@ -209,8 +233,9 @@ def __init__(self, dotprefix=False): # + .time mark + .ifindex + .rem index self.lldpRemSysCapEnabled = dp + "1.0.8802.1.1.2.1.4.1.1.12" - # From LLDP-MIB: lldpRemManAddrTable + # From LLDP-MIB: lldpRemManAddrTable, refer to https://mibs.observium.org/mib/LLDP-MIB/ # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr + self.lldpRemManAddrEntry = dp + "1.0.8802.1.1.2.1.4.2.1" # For walk_cmd self.lldpRemManAddrIfSubtype = dp + "1.0.8802.1.1.2.1.4.2.1.3" # + .time mark + .ifindex + .rem index + .addr_subtype + .man addr self.lldpRemManAddrIfId = dp + "1.0.8802.1.1.2.1.4.2.1.4" @@ -232,24 +257,31 @@ def __init__(self, dotprefix=False): self.sysTotalFreeSwap = dp + "1.3.6.1.4.1.2021.4.4.0" # From Cisco private MIB (PFC and queue counters) + # Refer to https://mibs.observium.org/mib/CISCO-PFC-EXT-MIB/ + self.cpfcIfEntry = dp + "1.3.6.1.4.1.9.9.813.1.1.1" # For walk_cmd self.cpfcIfRequests = dp + "1.3.6.1.4.1.9.9.813.1.1.1.1" # + .ifindex self.cpfcIfIndications = dp + "1.3.6.1.4.1.9.9.813.1.1.1.2" # + .ifindex + self.cpfcIfPriorityEntry = dp + "1.3.6.1.4.1.9.9.813.1.2.1" # For walk_cmd self.requestsPerPriority = dp + "1.3.6.1.4.1.9.9.813.1.2.1.2" # + .ifindex.prio self.indicationsPerPriority = dp + "1.3.6.1.4.1.9.9.813.1.2.1.3" # + .ifindex.prio # + .ifindex.IfDirection.QueueID - self.csqIfQosGroupStats = dp + "1.3.6.1.4.1.9.9.580.1.5.5.1.4" + self.csqIfQosGroupStatsEntry = dp + "1.3.6.1.4.1.9.9.580.1.5.5.1" # For walk_cmd + self.csqIfQosGroupStatsValue = dp + "1.3.6.1.4.1.9.9.580.1.5.5.1.4" # From Cisco private MIB (PSU) + # Refer to https://mibs.observium.org/mib/CISCO-ENTITY-FRU-CONTROL-MIB/ + self.cefcFRUPowerStatusEntry = dp + "1.3.6.1.4.1.9.9.117.1.1.2.1" # For walk_cmd self.cefcFRUPowerOperStatus = dp + "1.3.6.1.4.1.9.9.117.1.1.2.1.2" # + .psuindex - # ipCidrRouteTable MIB - self.ipCidrRouteEntry = dp + \ + # ipCidrRouteTable MIB, refer to https://mibs.observium.org/mib/IP-FORWARD-MIB/ + self.ipCidrRouteDest = dp + \ "1.3.6.1.2.1.4.24.4.1.1.0.0.0.0.0.0.0.0.0" # + .next hop IP self.ipCidrRouteStatus = dp + \ "1.3.6.1.2.1.4.24.4.1.16.0.0.0.0.0.0.0.0.0" # + .next hop IP - # Dot1q MIB - self.dot1qTpFdbEntry = dp + "1.3.6.1.2.1.17.7.1.2.2.1.2" # + .VLAN.MAC + # Dot1q MIB, refer to https://mibs.observium.org/mib/Q-BRIDGE-MIB/ + self.dot1qTpFdbEntry = dp + "1.3.6.1.2.1.17.7.1.2.2.1" # For walk_cmd + self.dot1qTpFdbPort = dp + "1.3.6.1.2.1.17.7.1.2.2.1.2" # + .VLAN.MAC def decode_hex(hexstring): @@ -278,10 +310,8 @@ def lookup_adminstatus(int_adminstatus): 2: 'down', 3: 'testing' } - if int_adminstatus in adminstatus_options.keys(): - return adminstatus_options[int_adminstatus] - else: - return "" + + return adminstatus_options.get(int_adminstatus, "") def lookup_operstatus(int_operstatus): @@ -294,39 +324,23 @@ def lookup_operstatus(int_operstatus): 6: 'notPresent', 7: 'lowerLayerDown' } - if int_operstatus in operstatus_options.keys(): - return operstatus_options[int_operstatus] - else: - return "" + return operstatus_options.get(int_operstatus, "") def decode_type(module, current_oid, val): - if six.PY3: - tagMap = { - rfc1902.Counter32.tagSet: int, - rfc1902.Gauge32.tagSet: int, - rfc1902.Integer32.tagSet: int, - rfc1902.IpAddress.tagSet: str, - univ.Null.tagSet: str, - univ.ObjectIdentifier.tagSet: str, - rfc1902.OctetString.tagSet: str, - rfc1902.TimeTicks.tagSet: int, - rfc1902.Counter64.tagSet: int - } - else: - tagMap = { - rfc1902.Counter32.tagSet: long, # noqa F821 - rfc1902.Gauge32.tagSet: long, # noqa F821 - rfc1902.Integer32.tagSet: long, # noqa F821 - rfc1902.IpAddress.tagSet: str, - univ.Null.tagSet: str, - univ.ObjectIdentifier.tagSet: str, - rfc1902.OctetString.tagSet: str, - rfc1902.TimeTicks.tagSet: long, # noqa F821 - rfc1902.Counter64.tagSet: long # noqa F821 - } + tagMap = { + rfc1902.Counter32.tagSet: int, + rfc1902.Gauge32.tagSet: int, + rfc1902.Integer32.tagSet: int, + rfc1902.IpAddress.tagSet: str, + univ.Null.tagSet: str, + univ.ObjectIdentifier.tagSet: str, + rfc1902.OctetString.tagSet: str, + rfc1902.TimeTicks.tagSet: int, + rfc1902.Counter64.tagSet: int + } - if val is None or not val: + if val is None: module.fail_json( msg="Unable to convert ASN1 type to python type. No value was returned for OID %s" % current_oid) @@ -339,31 +353,20 @@ def decode_type(module, current_oid, val): return pyVal -def main(): - module = AnsibleModule( - argument_spec=dict( - host=dict(required=True), - timeout=dict(reqired=False, type='int', default=5), - version=dict(required=True, choices=['v2', 'v2c', 'v3']), - community=dict(required=False, default=False), - username=dict(required=False), - level=dict(required=False, choices=['authNoPriv', 'authPriv']), - integrity=dict(required=False, choices=['md5', 'sha']), - privacy=dict(required=False, choices=['des', 'aes']), - authkey=dict(required=False), - privkey=dict(required=False), - is_dell=dict(required=False, default=False, type='bool'), - is_eos=dict(required=False, default=False, type='bool'), - include_swap=dict(required=False, default=False, type='bool'), - removeplaceholder=dict(required=False)), - required_together=(['username', 'level', 'integrity', 'authkey'], [ - 'privacy', 'privkey'],), - supports_check_mode=False) +def Tree(): + return defaultdict(Tree) + + +def oid_parent_child(parent, child): + return child.startswith(parent + '.') - m_args = module.params - if not has_pysnmp: - module.fail_json(msg='Missing required pysnmp module (check docs)') +def oid_same(oid1, oid2): + return oid1 == oid2 + + +def main_legacy(module): + m_args = module.params cmdGen = cmdgen.CommandGenerator() @@ -410,8 +413,6 @@ def main(): # Use v without a prefix to use with return values v = DefineOid(dotprefix=False) - def Tree(): return defaultdict(Tree) - results = Tree() # Getting system description could take more than 1 second on some Dell platform @@ -924,8 +925,8 @@ def Tree(): return defaultdict(Tree) errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.csqIfQosGroupStats,), + cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=m_args['timeout']), + cmdgen.MibVariable(p.csqIfQosGroupStatsValue,), lookupMib=False, ) @@ -936,7 +937,7 @@ def Tree(): return defaultdict(Tree) for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.csqIfQosGroupStats in current_oid: + if v.csqIfQosGroupStatsValue in current_oid: ifIndex = int(current_oid.split('.')[-4]) ifDirection = int(current_oid.split('.')[-3]) queueId = int(current_oid.split('.')[-2]) @@ -963,8 +964,8 @@ def Tree(): return defaultdict(Tree) errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.ipCidrRouteEntry,), + cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=m_args['timeout']), + cmdgen.MibVariable(p.ipCidrRouteDest,), cmdgen.MibVariable(p.ipCidrRouteStatus,), lookupMib=False, ) @@ -976,9 +977,9 @@ def Tree(): return defaultdict(Tree) for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.ipCidrRouteEntry in current_oid: + if v.ipCidrRouteDest in current_oid: # extract next hop ip from oid - next_hop = current_oid.split(v.ipCidrRouteEntry + ".")[1] + next_hop = current_oid.split(v.ipCidrRouteDest + ".")[1] results['snmp_cidr_route'][next_hop]['route_dest'] = current_val if v.ipCidrRouteStatus in current_oid: next_hop = current_oid.split(v.ipCidrRouteStatus + ".")[1] @@ -1042,8 +1043,8 @@ def Tree(): return defaultdict(Tree) errorIndication, errorStatus, errorIndex, varTable = cmdGen.nextCmd( snmp_auth, - cmdgen.UdpTransportTarget((m_args['host'], 161)), - cmdgen.MibVariable(p.dot1qTpFdbEntry,), + cmdgen.UdpTransportTarget((m_args['host'], 161), timeout=m_args['timeout']), + cmdgen.MibVariable(p.dot1qTpFdbPort,), lookupMib=False, ) @@ -1054,10 +1055,10 @@ def Tree(): return defaultdict(Tree) for oid, val in varBinds: current_oid = oid.prettyPrint() current_val = val.prettyPrint() - if v.dot1qTpFdbEntry in current_oid: + if v.dot1qTpFdbPort in current_oid: # extract fdb info from oid items = current_oid.split( - v.dot1qTpFdbEntry + ".")[1].split(".") + v.dot1qTpFdbPort + ".")[1].split(".") # VLAN + MAC(6) if len(items) != 7: continue @@ -1070,4 +1071,824 @@ def Tree(): return defaultdict(Tree) module.exit_json(ansible_facts=results) -main() +class SnmpFactsCollector: + def __init__(self, module): + self.module = module + self.m_args = module.params + self.results = Tree() + self.context = ContextData() + self.snmp_engine = SnmpEngine() + self.transport = None + self.logger = logging.getLogger(__name__) + + self._init_auth() + + # Use p to prefix OIDs with a dot for polling + self.p = DefineOid(dotprefix=True) + # Use v without a prefix to use with return values + self.v = DefineOid(dotprefix=False) + + def _init_auth(self): + # Verify that we receive a community when using snmp v2 + if self.m_args['version'] == "v2" or self.m_args['version'] == "v2c": + if self.m_args['community'] is False: + self.module.fail_json( + msg='Community not set when using snmp version 2' + ) + + if self.m_args['version'] == "v3": + if self.m_args['username'] is None: + self.module.fail_json( + msg='Username not set when using snmp version 3' + ) + + if self.m_args['level'] == "authPriv" and self.m_args['privacy'] is None: + self.module.fail_json( + msg='Privacy algorithm not set when using authPriv' + ) + + if self.m_args['integrity'] == "sha": + integrity_proto = cmdgen.usmHMACSHAAuthProtocol + elif self.m_args['integrity'] == "md5": + integrity_proto = cmdgen.usmHMACMD5AuthProtocol + + if self.m_args['privacy'] == "aes": + privacy_proto = cmdgen.usmAesCfb128Protocol + elif self.m_args['privacy'] == "des": + privacy_proto = cmdgen.usmDESPrivProtocol + + # Use SNMP Version 2 + if self.m_args['version'] == "v2" or self.m_args['version'] == "v2c": + self.snmp_auth = cmdgen.CommunityData(self.m_args['community']) + + # Use SNMP Version 3 with authNoPriv + elif self.m_args['level'] == "authNoPriv": + self.snmp_auth = cmdgen.UsmUserData( + self.m_args['username'], + authKey=self.m_args['authkey'], + authProtocol=integrity_proto + ) + # Use SNMP Version 3 with authPriv + else: + self.snmp_auth = cmdgen.UsmUserData( + self.m_args['username'], + authKey=self.m_args['authkey'], + privKey=self.m_args['privkey'], + authProtocol=integrity_proto, + privProtocol=privacy_proto + ) + + async def setup(self): + self.transport = await UdpTransportTarget.create( + (self.m_args['host'], 161), + timeout=self.m_args['timeout'] + ) + + async def _collect_system(self): + self.logger.info("Starting _collect_system") + errorIndication, errorStatus, errorIndex, varBinds = await get_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.sysDescr,)), + ObjectType(ObjectIdentity(self.p.sysObjectId,)), + ObjectType(ObjectIdentity(self.p.sysUpTime,)), + ObjectType(ObjectIdentity(self.p.sysContact,)), + ObjectType(ObjectIdentity(self.p.sysName,)), + ObjectType(ObjectIdentity(self.p.sysLocation,)), + lookupMib=False + ) + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying system information." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if oid_same(current_oid, self.v.sysDescr): + self.results['ansible_sysdescr'] = current_val + elif oid_same(current_oid, self.v.sysObjectId): + self.results['ansible_sysobjectid'] = current_val + elif oid_same(current_oid, self.v.sysUpTime): + self.results['ansible_sysuptime'] = current_val + elif oid_same(current_oid, self.v.sysContact): + self.results['ansible_syscontact'] = current_val + elif oid_same(current_oid, self.v.sysName): + self.results['ansible_sysname'] = current_val + elif oid_same(current_oid, self.v.sysLocation): + self.results['ansible_syslocation'] = current_val + self.logger.info("Finished _collect_system") + + async def _collect_interfaces(self): + self.logger.info("Starting _collect_interfaces") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.ifEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying ifTable." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + + if oid_parent_child(self.v.ifIndex, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifindex'] = current_val + elif oid_parent_child(self.v.ifDescr, current_oid): + self.results['snmp_interfaces'][ifIndex]['name'] = current_val + elif oid_parent_child(self.v.ifType, current_oid): + self.results['snmp_interfaces'][ifIndex]['type'] = current_val + elif oid_parent_child(self.v.ifMtu, current_oid): + self.results['snmp_interfaces'][ifIndex]['mtu'] = current_val + elif oid_parent_child(self.v.ifSpeed, current_oid): + self.results['snmp_interfaces'][ifIndex]['speed'] = current_val + elif oid_parent_child(self.v.ifPhysAddress, current_oid): + self.results['snmp_interfaces'][ifIndex]['mac'] = decode_mac(current_val) + elif oid_parent_child(self.v.ifAdminStatus, current_oid): + self.results['snmp_interfaces'][ifIndex]['adminstatus'] = lookup_adminstatus(int(current_val)) + elif oid_parent_child(self.v.ifOperStatus, current_oid): + self.results['snmp_interfaces'][ifIndex]['operstatus'] = lookup_operstatus(int(current_val)) + elif oid_parent_child(self.v.ifInUcastPkts, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifInUcastPkts'] = current_val + elif oid_parent_child(self.v.ifInDiscards, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifInDiscards'] = current_val + elif oid_parent_child(self.v.ifInErrors, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifInErrors'] = current_val + elif oid_parent_child(self.v.ifOutUcastPkts, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifOutUcastPkts'] = current_val + elif oid_parent_child(self.v.ifOutDiscards, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifOutDiscards'] = current_val + elif oid_parent_child(self.v.ifOutErrors, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifOutErrors'] = current_val + + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.ifXEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying ifXTable." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + + if oid_parent_child(self.v.ifHCInOctets, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifHCInOctets'] = current_val + elif oid_parent_child(self.v.ifHCOutOctets, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifHCOutOctets'] = current_val + elif oid_parent_child(self.v.ifHighSpeed, current_oid): + self.results['snmp_interfaces'][ifIndex]['ifHighSpeed'] = current_val + elif oid_parent_child(self.v.ifAlias, current_oid): + self.results['snmp_interfaces'][ifIndex]['description'] = current_val + self.logger.info("Finished _collect_interfaces") + + async def _collect_physical_entities(self): + self.logger.info("Starting _collect_physical_entities") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.entPhysicalEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying entPhysicalTable." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + entity_oid = int(current_oid.rsplit('.', 1)[-1]) + + if oid_parent_child(self.v.entPhysDescr, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysDescr'] = current_val + elif oid_parent_child(self.v.entPhysContainedIn, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysContainedIn'] = int(current_val) + elif oid_parent_child(self.v.entPhysClass, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysClass'] = int(current_val) + elif oid_parent_child(self.v.entPhyParentRelPos, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhyParentRelPos'] = int(current_val) + elif oid_parent_child(self.v.entPhysName, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysName'] = current_val + elif oid_parent_child(self.v.entPhysHwVer, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysHwVer'] = current_val + elif oid_parent_child(self.v.entPhysFwVer, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysFwVer'] = current_val + elif oid_parent_child(self.v.entPhysSwVer, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysSwVer'] = current_val + elif oid_parent_child(self.v.entPhysSerialNum, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysSerialNum'] = current_val + elif oid_parent_child(self.v.entPhysMfgName, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysMfgName'] = current_val + elif oid_parent_child(self.v.entPhysModelName, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysModelName'] = current_val + elif oid_parent_child(self.v.entPhysIsFRU, current_oid): + self.results['snmp_physical_entities'][entity_oid]['entPhysIsFRU'] = int(current_val) + self.logger.info("Finished _collect_physical_entities") + + async def _collect_sensors(self): + self.logger.info("Starting _collect_sensors") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.entPhySensorEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying entPhySensorEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + sensor_oid = int(current_oid.rsplit('.', 1)[-1]) + + if oid_parent_child(self.v.entPhySensorType, current_oid): + self.results['snmp_sensors'][sensor_oid]['entPhySensorType'] = current_val + elif oid_parent_child(self.v.entPhySensorScale, current_oid): + self.results['snmp_sensors'][sensor_oid]['entPhySensorScale'] = int(current_val) + elif oid_parent_child(self.v.entPhySensorPrecision, current_oid): + self.results['snmp_sensors'][sensor_oid]['entPhySensorPrecision'] = current_val + elif oid_parent_child(self.v.entPhySensorValue, current_oid): + self.results['snmp_sensors'][sensor_oid]['entPhySensorValue'] = current_val + elif oid_parent_child(self.v.entPhySensorOperStatus, current_oid): + self.results['snmp_sensors'][sensor_oid]['entPhySensorOperStatus'] = current_val + self.logger.info("Finished _collect_sensors") + + async def _collect_ipaddr(self): + self.logger.info("Starting _collect_ipaddr") + ipv4_networks = Tree() + all_ipv4_addresses = [] + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.ipAddrEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying ipAddrEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + curIPList = current_oid.rsplit('.', 4)[-4:] + curIP = ".".join(curIPList) + + if oid_parent_child(self.v.ipAdEntAddr, current_oid): + ipv4_networks[curIP]['address'] = current_val + all_ipv4_addresses.append(current_val) + elif oid_parent_child(self.v.ipAdEntIfIndex, current_oid): + ipv4_networks[curIP]['interface'] = current_val + elif oid_parent_child(self.v.ipAdEntNetMask, current_oid): + ipv4_networks[curIP]['netmask'] = current_val + + interface_to_ipv4 = {} + for ipv4_network in ipv4_networks: + current_interface = ipv4_networks[ipv4_network]['interface'] + current_network = { + 'address': ipv4_networks[ipv4_network]['address'], + 'netmask': ipv4_networks[ipv4_network]['netmask'] + } + if current_interface not in interface_to_ipv4: + interface_to_ipv4[current_interface] = [] + interface_to_ipv4[current_interface].append(current_network) + else: + interface_to_ipv4[current_interface].append(current_network) + + for interface in interface_to_ipv4: + self.results['snmp_interfaces'][int(interface)]['ipv4'] = interface_to_ipv4[interface] + + self.results['ansible_all_ipv4_addresses'] = all_ipv4_addresses + self.logger.info("Finished _collect_ipaddr") + + async def _collect_lldp_sys(self): + self.logger.info("Starting _collect_lldp_sys") + errorIndication, errorStatus, errorIndex, varBinds = await get_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.lldpLocChassisIdSubtype,)), + ObjectType(ObjectIdentity(self.p.lldpLocChassisId,)), + ObjectType(ObjectIdentity(self.p.lldpLocSysName,)), + ObjectType(ObjectIdentity(self.p.lldpLocSysDesc,)), + lookupMib=False + ) + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying lldp system information." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + if oid_same(current_oid, self.v.lldpLocChassisIdSubtype): + self.results['snmp_lldp']['lldpLocChassisIdSubtype'] = current_val + elif oid_same(current_oid, self.v.lldpLocChassisId): + self.results['snmp_lldp']['lldpLocChassisId'] = current_val + elif oid_same(current_oid, self.v.lldpLocSysName): + self.results['snmp_lldp']['lldpLocSysName'] = current_val + elif oid_same(current_oid, self.v.lldpLocSysDesc): + self.results['snmp_lldp']['lldpLocSysDesc'] = current_val + self.logger.info("Finished _collect_lldp_sys") + + async def _collect_lldp_ports(self): + self.logger.info("Starting _collect_lldp_ports") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.lldpLocPortEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying lldpLocPortEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + + if oid_parent_child(self.v.lldpLocPortIdSubtype, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpLocPortIdSubtype'] = current_val + elif oid_parent_child(self.v.lldpLocPortId, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpLocPortId'] = current_val + elif oid_parent_child(self.v.lldpLocPortDesc, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpLocPortDesc'] = current_val + self.logger.info("Finished _collect_lldp_ports") + + async def _collect_lldp_locman(self): + self.logger.info("Starting _collect_lldp_locman") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.lldpLocManAddrEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying lldpLocManAddrEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.lldpLocManAddrLen, current_oid): + self.results['snmp_lldp']['lldpLocManAddrLen'] = current_val + elif oid_parent_child(self.v.lldpLocManAddrIfSubtype, current_oid): + self.results['snmp_lldp']['lldpLocManAddrIfSubtype'] = current_val + elif oid_parent_child(self.v.lldpLocManAddrIfId, current_oid): + self.results['snmp_lldp']['lldpLocManAddrIfId'] = current_val + elif oid_parent_child(self.v.lldpLocManAddrOID, current_oid): + self.results['snmp_lldp']['lldpLocManAddrOID'] = current_val + self.logger.info("Finished _collect_lldp_locman") + + async def _collect_lldp_rem(self): + self.logger.info("Starting _collect_lldp_rem") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.lldpRemEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying lldpRemEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + ifIndex = int(current_oid.split('.')[12]) + + if oid_parent_child(self.v.lldpRemChassisIdSubtype, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemChassisIdSubtype'] = current_val + elif oid_parent_child(self.v.lldpRemChassisId, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemChassisId'] = current_val + elif oid_parent_child(self.v.lldpRemPortIdSubtype, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemPortIdSubtype'] = current_val + elif oid_parent_child(self.v.lldpRemPortId, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemPortId'] = current_val + elif oid_parent_child(self.v.lldpRemPortDesc, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemPortDesc'] = current_val + elif oid_parent_child(self.v.lldpRemSysName, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemSysName'] = current_val + elif oid_parent_child(self.v.lldpRemSysDesc, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemSysDesc'] = current_val + elif oid_parent_child(self.v.lldpRemSysCapSupported, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemSysCapSupported'] = current_val + elif oid_parent_child(self.v.lldpRemSysCapEnabled, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemSysCapEnabled'] = current_val + self.logger.info("Finished _collect_lldp_rem") + + async def _collect_lldp_rem_man_addr(self): + self.logger.info("Starting _collect_lldp_rem_man_addr") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.lldpRemManAddrEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying lldpRemManAddrEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + ifIndex = int(current_oid.split('.')[12]) + + if oid_parent_child(self.v.lldpRemManAddrIfSubtype, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemManAddrIfSubtype'] = current_val + elif oid_parent_child(self.v.lldpRemManAddrIfId, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemManAddrIfId'] = current_val + elif oid_parent_child(self.v.lldpRemManAddrOID, current_oid): + self.results['snmp_interfaces'][ifIndex]['lldpRemManAddrOID'] = current_val + self.logger.info("Finished _collect_lldp_rem_man_addr") + + async def _collect_dell_cpu(self): + self.logger.info("Starting _collect_dell_cpu") + if self.m_args['is_dell']: + errorIndication, errorStatus, errorIndex, varBinds = await get_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.ChStackUnitCpuUtil5sec,)), + lookupMib=False + ) + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying ChStackUnitCpuUtil5sec" + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + if oid_same(current_oid, self.v.ChStackUnitCpuUtil5sec): + self.results['ansible_ChStackUnitCpuUtil5sec'] = decode_type(self.module, current_oid, val) + self.logger.info("Finished _collect_dell_cpu") + + async def _collect_sys_mem(self): + self.logger.info("Starting _collect_sys_mem") + if not self.m_args['is_eos']: + errorIndication, errorStatus, errorIndex, varBinds = await get_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.sysTotalMemory,)), + ObjectType(ObjectIdentity(self.p.sysTotalFreeMemory,)), + ObjectType(ObjectIdentity(self.p.sysTotalSharedMemory,)), + ObjectType(ObjectIdentity(self.p.sysTotalBuffMemory,)), + ObjectType(ObjectIdentity(self.p.sysCachedMemory,)), + lookupMib=False + ) + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying system memory." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + if oid_same(current_oid, self.v.sysTotalMemory): + self.results['ansible_sysTotalMemory'] = decode_type(self.module, current_oid, val) + elif oid_same(current_oid, self.v.sysTotalFreeMemory): + self.results['ansible_sysTotalFreeMemory'] = decode_type(self.module, current_oid, val) + elif oid_same(current_oid, self.v.sysTotalSharedMemory): + self.results['ansible_sysTotalSharedMemory'] = decode_type(self.module, current_oid, val) + elif oid_same(current_oid, self.v.sysTotalBuffMemory): + self.results['ansible_sysTotalBuffMemory'] = decode_type(self.module, current_oid, val) + elif oid_same(current_oid, self.v.sysCachedMemory): + self.results['ansible_sysCachedMemory'] = decode_type(self.module, current_oid, val) + self.logger.info("Finished _collect_sys_mem") + + async def _collect_swap(self): + self.logger.info("Starting _collect_swap") + if self.m_args['include_swap']: + errorIndication, errorStatus, errorIndex, varBinds = await get_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.sysTotalSwap,)), + ObjectType(ObjectIdentity(self.p.sysTotalFreeSwap,)), + lookupMib=False + ) + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying swap." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + if oid_same(current_oid, self.v.sysTotalSwap): + self.results['ansible_sysTotalSwap'] = decode_type(self.module, current_oid, val) + elif oid_same(current_oid, self.v.sysTotalFreeSwap): + self.results['ansible_sysTotalFreeSwap'] = decode_type(self.module, current_oid, val) + self.logger.info("Finished _collect_swap") + + async def _collect_cisco_pfc_if(self): + self.logger.info("Starting _collect_cisco_pfc_if") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.cpfcIfEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying cpfcIfEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + ifIndex = int(current_oid.rsplit('.', 1)[-1]) + + if oid_parent_child(self.v.cpfcIfRequests, current_oid): + self.results['snmp_interfaces'][ifIndex]['cpfcIfRequests'] = current_val + elif oid_parent_child(self.v.cpfcIfIndications, current_oid): + self.results['snmp_interfaces'][ifIndex]['cpfcIfIndications'] = current_val + self.logger.info("Finished _collect_cisco_pfc_if") + + async def _collect_cisco_pfc_priority(self): + self.logger.info("Starting _collect_cisco_pfc_priority") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.cpfcIfPriorityEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying cpfcIfPriorityEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.requestsPerPriority, current_oid): + ifIndex = int(current_oid.split('.')[-2]) + prio = int(current_oid.split('.')[-1]) + self.results['snmp_interfaces'][ifIndex]['requestsPerPriority'][prio] = current_val + elif oid_parent_child(self.v.indicationsPerPriority, current_oid): + ifIndex = int(current_oid.split('.')[-2]) + prio = int(current_oid.split('.')[-1]) + self.results['snmp_interfaces'][ifIndex]['indicationsPerPriority'][prio] = current_val + self.logger.info("Finished _collect_cisco_pfc_priority") + + async def _collect_cisco_qos(self): + self.logger.info("Starting _collect_cisco_qos") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.csqIfQosGroupStatsEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying csqIfQosGroupStatsEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.csqIfQosGroupStatsValue, current_oid): + ifIndex = int(current_oid.split('.')[-4]) + ifDirection = int(current_oid.split('.')[-3]) + queueId = int(current_oid.split('.')[-2]) + counterId = int(current_oid.split('.')[-1]) + self.results['snmp_interfaces'][ifIndex]['queues'][ifDirection][queueId][counterId] = current_val + self.logger.info("Finished _collect_cisco_qos") + + async def _collect_cisco_psu(self): + self.logger.info("Starting _collect_cisco_psu") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.cefcFRUPowerStatusEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying cefcFRUPowerStatusEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.cefcFRUPowerOperStatus, current_oid): + psuIndex = int(current_oid.split('.')[-1]) + self.results['snmp_psu'][psuIndex]['operstatus'] = current_val + self.logger.info("Finished _collect_cisco_psu") + + async def _collect_ip_route(self): + self.logger.info("Starting _collect_ip_route") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.ipCidrRouteDest)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying ipCidrRouteDest." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.ipCidrRouteDest, current_oid): + next_hop = current_oid.split(self.v.ipCidrRouteDest + ".")[1] + self.results['snmp_cidr_route'][next_hop]['route_dest'] = current_val + + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.ipCidrRouteStatus)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying ipCidrRouteStatus." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.ipCidrRouteStatus, current_oid): + next_hop = current_oid.split(self.v.ipCidrRouteStatus + ".")[1] + self.results['snmp_cidr_route'][next_hop]['status'] = current_val + + self.logger.info("Finished _collect_ip_route") + + async def _collect_fdb(self): + self.logger.info("Starting _collect_fdb") + async for errorIndication, errorStatus, errorIndex, varBinds in walk_cmd( + self.snmp_engine, + self.snmp_auth, + self.transport, + ContextData(), + ObjectType(ObjectIdentity(self.p.dot1qTpFdbEntry)), + lookupMib=False, + lexicographicMode=False + ): + if errorIndication: + self.module.fail_json( + msg=f"{str(errorIndication)} querying dot1qTpFdbEntry." + ) + + for oid, val in varBinds: + current_oid = oid.prettyPrint() + current_val = val.prettyPrint() + + if oid_parent_child(self.v.dot1qTpFdbPort, current_oid): + # extract fdb info from oid + items = current_oid.split(self.v.dot1qTpFdbPort + ".")[1].split(".") + # VLAN + MAC(6) + if len(items) != 7: + continue + mac_str = "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}".format( + int(items[1]), int(items[2]), int(items[3]), int(items[4]), int(items[5]), int(items[6]) + ) + # key must be string + key = items[0] + '.' + mac_str + self.results['snmp_fdb'][key] = current_val + self.logger.info("Finished _collect_fdb") + + async def collect_all(self): + if self.transport is None: + raise Exception("Transport not initialized. Call setup() first.") + await asyncio.gather( + self._collect_system(), + self._collect_interfaces(), + self._collect_physical_entities(), + self._collect_sensors(), + self._collect_ipaddr(), + self._collect_lldp_sys(), + self._collect_lldp_ports(), + self._collect_lldp_locman(), + self._collect_lldp_rem(), + self._collect_lldp_rem_man_addr(), + self._collect_dell_cpu(), + self._collect_sys_mem(), + self._collect_swap(), + self._collect_cisco_pfc_if(), + self._collect_cisco_pfc_priority(), + self._collect_cisco_qos(), + self._collect_cisco_psu(), + self._collect_ip_route(), + self._collect_fdb() + ) + + +async def main(module): + collector = SnmpFactsCollector(module) + await collector.setup() + await collector.collect_all() + module.exit_json(ansible_facts=collector.results) + + +if __name__ == "__main__": + module = AnsibleModule( + argument_spec=dict( + host=dict(required=True), + # https://github.com/sonic-net/sonic-buildimage/blob/7a21cab07dbd0ace80833a57e391dec0ebde9978/dockers/docker-snmp/snmpd.conf.j2#L197 + # In snmpd.conf, we set the timeout as 5s and 4 retries. + # Total time window = 4 * 5 = 20 seconds + timeout=dict(reqired=False, type='int', default=20), + version=dict(required=True, choices=['v2', 'v2c', 'v3']), + community=dict(required=False, default=False), + username=dict(required=False), + level=dict(required=False, choices=['authNoPriv', 'authPriv']), + integrity=dict(required=False, choices=['md5', 'sha']), + privacy=dict(required=False, choices=['des', 'aes']), + authkey=dict(required=False), + privkey=dict(required=False), + is_dell=dict(required=False, default=False, type='bool'), + is_eos=dict(required=False, default=False, type='bool'), + include_swap=dict(required=False, default=False, type='bool'), + removeplaceholder=dict(required=False) + ), + required_together=( + ['username', 'level', 'integrity', 'authkey'], + ['privacy', 'privkey'], + ), + supports_check_mode=False + ) + + timestamp = datetime.datetime.now().isoformat() + config_module_logging(f'snmp_facts_{module.params["host"]}_{timestamp}') + + if pysnmp.version[0] < 5: + main_legacy(module) + else: + asyncio.run(main(module)) diff --git a/ansible/roles/vm_set/tasks/add_ceos_list.yml b/ansible/roles/vm_set/tasks/add_ceos_list.yml index 1d03e3af3b7..a928f52e160 100644 --- a/ansible/roles/vm_set/tasks/add_ceos_list.yml +++ b/ansible/roles/vm_set/tasks/add_ceos_list.yml @@ -140,7 +140,7 @@ retries: 10 delay: 30 -- name: Create network for ceos container net_{{ vm_set_name }}_{{ vm_name }} +- name: Create network for net ceos container become: yes ceos_network: name: net_{{ vm_set_name }}_{{ vm_name }} diff --git a/ansible/roles/vm_set/tasks/main.yml b/ansible/roles/vm_set/tasks/main.yml index 69bbae918ef..70bde00fd07 100644 --- a/ansible/roles/vm_set/tasks/main.yml +++ b/ansible/roles/vm_set/tasks/main.yml @@ -102,6 +102,7 @@ pkg: - python3-libvirt - python3-pip + - python3-lxml - libvirt-daemon-system - qemu-system-x86 become: yes diff --git a/ansible/roles/vm_set/tasks/stop_sonic_vm.yml b/ansible/roles/vm_set/tasks/stop_sonic_vm.yml index 93e66e8a3a6..742fd443958 100644 --- a/ansible/roles/vm_set/tasks/stop_sonic_vm.yml +++ b/ansible/roles/vm_set/tasks/stop_sonic_vm.yml @@ -1,6 +1,6 @@ - set_fact: sonic_vm_storage_location: "{{ home_path }}/sonic-vm" - when: sonic_vm_storage_location is not defined + when: sonic_vm_storage_location is not defined - set_fact: disk_image: "{{ sonic_vm_storage_location }}/disks/sonic_{{ dut_name }}.img" diff --git a/tests/common/devices/base.py b/tests/common/devices/base.py index 3179ca88a06..33ebe2871b9 100644 --- a/tests/common/devices/base.py +++ b/tests/common/devices/base.py @@ -1,7 +1,7 @@ import inspect import json import logging - +import collections from multiprocessing.pool import ThreadPool from tests.common.errors import RunAnsibleModuleFail @@ -36,6 +36,8 @@ class CustomEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, bytes): return obj.decode('utf-8') + elif isinstance(obj, collections.UserDict): + return obj.data return super().default(obj) def __init__(self, ansible_adhoc, hostname, *args, **kwargs): @@ -103,6 +105,7 @@ def run_module(module_args, complex_args): module_args = json.loads(json.dumps(module_args, cls=AnsibleHostBase.CustomEncoder)) complex_args = json.loads(json.dumps(complex_args, cls=AnsibleHostBase.CustomEncoder)) res = self.module(*module_args, **complex_args)[self.hostname] + res.encoder = AnsibleHostBase.CustomEncoder if verbose: logger.debug( diff --git a/tests/common/plugins/ansible_fixtures.py b/tests/common/plugins/ansible_fixtures.py index 3e12700f526..eda40cf0cce 100644 --- a/tests/common/plugins/ansible_fixtures.py +++ b/tests/common/plugins/ansible_fixtures.py @@ -2,6 +2,14 @@ import pytest from pytest_ansible.host_manager import get_host_manager +try: + # Initialize ansible plugin loader to avoid issues with ansbile-core 2.18 + from ansible.plugins.loader import init_plugin_loader + init_plugin_loader() +except ImportError: + # Nothing need to do for ansible-core 2.13 + pass + # Here we override ansible_adhoc fixture from pytest-ansible plugin to overcome # scope limitation issue; since we want to be able to use ansible_adhoc in module/class scope diff --git a/tests/conftest.py b/tests/conftest.py index def82052904..26631ecfba9 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,7 +2,6 @@ import glob import json import logging -import getpass import random import re @@ -227,32 +226,6 @@ def enhance_inventory(request): logger.error("Failed to set enhanced 'ansible_inventory' to request.config.option") -def pytest_cmdline_main(config): - - # Filter out unnecessary pytest_ansible plugin log messages - pytest_ansible_logger = logging.getLogger("pytest_ansible") - if pytest_ansible_logger: - pytest_ansible_logger.setLevel(logging.WARNING) - - # Filter out unnecessary ansible log messages (ansible v2.8) - # The logger name of ansible v2.8 is nasty - mypid = str(os.getpid()) - user = getpass.getuser() - ansible_loggerv28 = logging.getLogger("p=%s u=%s | " % (mypid, user)) - if ansible_loggerv28: - ansible_loggerv28.setLevel(logging.WARNING) - - # Filter out unnecessary ansible log messages (latest ansible) - ansible_logger = logging.getLogger("ansible") - if ansible_logger: - ansible_logger.setLevel(logging.WARNING) - - # Filter out unnecessary logs generated by calling the ptfadapter plugin - dataplane_logger = logging.getLogger("dataplane") - if dataplane_logger: - dataplane_logger.setLevel(logging.ERROR) - - def pytest_collection(session): """Workaround to reduce messy plugin logs generated during collection only diff --git a/tests/ptf_runner.py b/tests/ptf_runner.py index 80f12e19c6a..1a7a1a7f4ff 100644 --- a/tests/ptf_runner.py +++ b/tests/ptf_runner.py @@ -97,7 +97,11 @@ def ptf_runner(host, testdir, testname, platform_dir=None, params={}, # when ptf cmd execution result is 0 (success), we need to skip collecting pcap file ptf_collect(host, log_file, result is not None and result.get("rc", -1) == 0) if result: - allure.attach(json.dumps(result, indent=4), 'ptf_console_result', allure.attachment_type.TEXT) + allure.attach( + json.dumps(result, indent=4, cls=result.encoder), + 'ptf_console_result', + allure.attachment_type.TEXT + ) if module_ignore_errors: if result["rc"] != 0: return result diff --git a/tests/test_nbr_health.py b/tests/test_nbr_health.py index 20f03590d05..cddfaba6c66 100644 --- a/tests/test_nbr_health.py +++ b/tests/test_nbr_health.py @@ -28,7 +28,7 @@ def check_snmp(hostname, mgmt_addr, localhost, community, is_eos): def check_eos_facts(hostname, mgmt_addr, host): logger.info("Check neighbor {} eos facts".format(hostname)) res = host.eos_facts(gather_subset=["!config"]) - logger.info("facts: {}".format(json.dumps(res, indent=4))) + logger.info("facts: {}".format(json.dumps(res, indent=4, cls=res.encoder))) try: eos_facts = res['ansible_facts'] except Exception as e: From 8f3c595d93ca23ad04d322f68317440d58bb72f6 Mon Sep 17 00:00:00 2001 From: Xin Wang Date: Tue, 11 Nov 2025 15:06:41 +0800 Subject: [PATCH 2/2] Filter out unnecessary dataplane logs PR https://github.com/sonic-net/sonic-mgmt/pull/21045 for fixing compatibility issues of docker-sonic-mgmt upgrade removed the hook for setting log levels of couple of loggers. The code for setting log level of ansible loggers is no longer needed after previous ansible upgrade a few years ago. However, setting log level for the 'dataplane' logger is still necessary. This change added back the code to set log level for the 'dataplane' logger to avoid unnessary logs when 'ptfadapter' is used in test script. Signed-off-by: Xin Wang --- tests/conftest.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 26631ecfba9..b53b0c02943 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -226,6 +226,14 @@ def enhance_inventory(request): logger.error("Failed to set enhanced 'ansible_inventory' to request.config.option") +def pytest_cmdline_main(config): + + # Filter out unnecessary logs generated by calling the ptfadapter plugin + dataplane_logger = logging.getLogger("dataplane") + if dataplane_logger: + dataplane_logger.setLevel(logging.ERROR) + + def pytest_collection(session): """Workaround to reduce messy plugin logs generated during collection only