diff --git a/cfgmgr/Makefile.am b/cfgmgr/Makefile.am index 685ab04407c..09fda145fce 100644 --- a/cfgmgr/Makefile.am +++ b/cfgmgr/Makefile.am @@ -5,7 +5,7 @@ LIBNL_LIBS = -lnl-genl-3 -lnl-route-3 -lnl-3 SAIMETA_LIBS = -lsaimeta -lsaimetadata -lzmq COMMON_LIBS = -lswsscommon -bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd +bin_PROGRAMS = vlanmgrd teammgrd portmgrd intfmgrd buffermgrd vrfmgrd nbrmgrd vxlanmgrd sflowmgrd natmgrd coppmgrd tunnelmgrd macsecmgrd fabricmgrd cfgmgrdir = $(datadir)/swss @@ -46,6 +46,11 @@ portmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLA portmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) portmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) +fabricmgrd_SOURCES = fabricmgrd.cpp fabricmgr.cpp $(COMMON_ORCH_SOURCE) shellcmd.h +fabricmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +fabricmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) +fabricmgrd_LDADD = $(LDFLAGS_ASAN) $(COMMON_LIBS) $(SAIMETA_LIBS) + intfmgrd_SOURCES = intfmgrd.cpp intfmgr.cpp $(top_srcdir)/lib/subintf.cpp $(COMMON_ORCH_SOURCE) shellcmd.h intfmgrd_CFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) intfmgrd_CPPFLAGS = $(DBGFLAGS) $(AM_CFLAGS) $(CFLAGS_COMMON) $(CFLAGS_SAI) $(CFLAGS_ASAN) @@ -110,6 +115,7 @@ natmgrd_LDADD += -lgcovpreload coppmgrd_LDADD += -lgcovpreload tunnelmgrd_LDADD += -lgcovpreload macsecmgrd_LDADD += -lgcovpreload +fabricmgrd_LDADD += -lgcovpreload endif if ASAN_ENABLED @@ -126,5 +132,6 @@ natmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp coppmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp tunnelmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp macsecmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp +fabricmgrd_SOURCES += $(top_srcdir)/lib/asan.cpp endif diff --git a/cfgmgr/coppmgr.cpp b/cfgmgr/coppmgr.cpp index 5aa3b06b4e2..cfa94988d90 100644 --- a/cfgmgr/coppmgr.cpp +++ b/cfgmgr/coppmgr.cpp @@ -285,7 +285,7 @@ bool CoppMgr::isDupEntry(const std::string &key, std::vector &f if ((!field_found) || (field_found && preserved_copp_it->second.compare(value))) { // overwrite -> delete preserved entry from copp table and set a new entry instead - m_coppTable.del(key); + m_appCoppTable.del(key); return false; } } @@ -415,7 +415,7 @@ CoppMgr::CoppMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, c auto copp_it = supported_copp_keys.find(it); if (copp_it == supported_copp_keys.end()) { - m_coppTable.del(it); + m_appCoppTable.del(it); } } } diff --git a/cfgmgr/fabricmgr.cpp b/cfgmgr/fabricmgr.cpp new file mode 100644 index 00000000000..bcbaa5726af --- /dev/null +++ b/cfgmgr/fabricmgr.cpp @@ -0,0 +1,119 @@ +#include "logger.h" +#include "dbconnector.h" +#include "producerstatetable.h" +#include "tokenize.h" +#include "ipprefix.h" +#include "fabricmgr.h" +#include "exec.h" +#include "shellcmd.h" +#include + +using namespace std; +using namespace swss; + +FabricMgr::FabricMgr(DBConnector *cfgDb, DBConnector *appDb, const vector &tableNames) : + Orch(cfgDb, tableNames), + m_cfgFabricMonitorTable(cfgDb, CFG_FABRIC_MONITOR_DATA_TABLE_NAME), + m_cfgFabricPortTable(cfgDb, CFG_FABRIC_MONITOR_PORT_TABLE_NAME), + m_appFabricMonitorTable(appDb, APP_FABRIC_MONITOR_DATA_TABLE_NAME), + m_appFabricPortTable(appDb, APP_FABRIC_MONITOR_PORT_TABLE_NAME) +{ +} + +void FabricMgr::doTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto table = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string key = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + + string monErrThreshCrcCells, monErrThreshRxCells; + string monPollThreshRecovery, monPollThreshIsolation; + string isolateStatus; + string alias, lanes; + std::vector field_values; + string value; + + for (auto i : kfvFieldsValues(t)) + { + if (fvField(i) == "monErrThreshCrcCells") + { + monErrThreshCrcCells = fvValue(i); + writeConfigToAppDb(key, "monErrThreshCrcCells", monErrThreshCrcCells); + } + else if (fvField(i) == "monErrThreshRxCells") + { + monErrThreshRxCells = fvValue(i); + writeConfigToAppDb(key, "monErrThreshRxCells", monErrThreshRxCells); + } + else if (fvField(i) == "monPollThreshRecovery") + { + monPollThreshRecovery = fvValue(i); + writeConfigToAppDb(key, "monPollThreshRecovery", monPollThreshRecovery); + } + else if (fvField(i) == "monPollThreshIsolation") + { + monPollThreshIsolation = fvValue(i); + writeConfigToAppDb(key, "monPollThreshIsolation", monPollThreshIsolation); + } + else if (fvField(i) == "alias") + { + alias = fvValue(i); + writeConfigToAppDb(key, "alias", alias); + } + else if (fvField(i) == "lanes") + { + lanes = fvValue(i); + writeConfigToAppDb(key, "lanes", lanes); + } + else if (fvField(i) == "isolateStatus") + { + isolateStatus = fvValue(i); + writeConfigToAppDb(key, "isolateStatus", isolateStatus); + } + else + { + field_values.emplace_back(i); + } + } + + for (auto &entry : field_values) + { + writeConfigToAppDb(key, fvField(entry), fvValue(entry)); + } + + } + it = consumer.m_toSync.erase(it); + } +} + +bool FabricMgr::writeConfigToAppDb(const std::string &key, const std::string &field, const std::string &value) +{ + vector fvs; + FieldValueTuple fv(field, value); + fvs.push_back(fv); + if (key == "FABRIC_MONITOR_DATA") + { + m_appFabricMonitorTable.set(key, fvs); + SWSS_LOG_NOTICE("Write FABRIC_MONITOR:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + } + else + { + m_appFabricPortTable.set(key, fvs); + SWSS_LOG_NOTICE("Write FABRIC_PORT:%s %s to %s", key.c_str(), field.c_str(), value.c_str()); + } + + return true; +} + + diff --git a/cfgmgr/fabricmgr.h b/cfgmgr/fabricmgr.h new file mode 100644 index 00000000000..dbe2fd0d897 --- /dev/null +++ b/cfgmgr/fabricmgr.h @@ -0,0 +1,30 @@ +#pragma once + +#include "dbconnector.h" +#include "orch.h" +#include "producerstatetable.h" + +#include +#include +#include + +namespace swss { + + +class FabricMgr : public Orch +{ +public: + FabricMgr(DBConnector *cfgDb, DBConnector *appDb, const std::vector &tableNames); + + using Orch::doTask; +private: + Table m_cfgFabricMonitorTable; + Table m_cfgFabricPortTable; + Table m_appFabricMonitorTable; + Table m_appFabricPortTable; + + void doTask(Consumer &consumer); + bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); +}; + +} diff --git a/cfgmgr/fabricmgrd.cpp b/cfgmgr/fabricmgrd.cpp new file mode 100644 index 00000000000..3d0777e17c3 --- /dev/null +++ b/cfgmgr/fabricmgrd.cpp @@ -0,0 +1,73 @@ +#include +#include +#include +#include +#include + +#include "exec.h" +#include "fabricmgr.h" +#include "schema.h" +#include "select.h" + +using namespace std; +using namespace swss; + +/* select() function timeout retry time, in millisecond */ +#define SELECT_TIMEOUT 1000 + +int main(int argc, char **argv) +{ + Logger::linkToDbNative("fabricmgrd"); + SWSS_LOG_ENTER(); + + SWSS_LOG_NOTICE("--- Starting fabricmgrd ---"); + + try + { + vector cfg_fabric_tables = { + CFG_FABRIC_MONITOR_DATA_TABLE_NAME, + CFG_FABRIC_MONITOR_PORT_TABLE_NAME, + }; + + DBConnector cfgDb("CONFIG_DB", 0); + DBConnector appDb("APPL_DB", 0); + + FabricMgr fabricmgr(&cfgDb, &appDb, cfg_fabric_tables); + + // TODO: add tables in stateDB which interface depends on to monitor list + vector cfgOrchList = {&fabricmgr}; + + swss::Select s; + for (Orch *o : cfgOrchList) + { + s.addSelectables(o->getSelectables()); + } + + while (true) + { + Selectable *sel; + int ret; + + ret = s.select(&sel, SELECT_TIMEOUT); + if (ret == Select::ERROR) + { + SWSS_LOG_NOTICE("Error: %s!", strerror(errno)); + continue; + } + if (ret == Select::TIMEOUT) + { + fabricmgr.doTask(); + continue; + } + + auto *c = (Executor *)sel; + c->execute(); + } + } + catch (const exception &e) + { + SWSS_LOG_ERROR("Runtime error: %s", e.what()); + } + return -1; +} + diff --git a/cfgmgr/portmgr.cpp b/cfgmgr/portmgr.cpp index 5134b31861c..19ba41dc909 100644 --- a/cfgmgr/portmgr.cpp +++ b/cfgmgr/portmgr.cpp @@ -14,8 +14,10 @@ using namespace swss; PortMgr::PortMgr(DBConnector *cfgDb, DBConnector *appDb, DBConnector *stateDb, const vector &tableNames) : Orch(cfgDb, tableNames), m_cfgPortTable(cfgDb, CFG_PORT_TABLE_NAME), + m_cfgSendToIngressPortTable(cfgDb, CFG_SEND_TO_INGRESS_PORT_TABLE_NAME), m_cfgLagMemberTable(cfgDb, CFG_LAG_MEMBER_TABLE_NAME), m_statePortTable(stateDb, STATE_PORT_TABLE_NAME), + m_appSendToIngressPortTable(appDb, APP_SEND_TO_INGRESS_PORT_TABLE_NAME), m_appPortTable(appDb, APP_PORT_TABLE_NAME) { } @@ -93,11 +95,49 @@ bool PortMgr::isPortStateOk(const string &alias) return false; } +void PortMgr::doSendToIngressPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + KeyOpFieldsValuesTuple t = it->second; + + string alias = kfvKey(t); + string op = kfvOp(t); + auto fvs = kfvFieldsValues(t); + + if (op == SET_COMMAND) + { + SWSS_LOG_NOTICE("Add SendToIngress Port: %s", + alias.c_str()); + m_appSendToIngressPortTable.set(alias, fvs); + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_NOTICE("Removing SendToIngress Port: %s", + alias.c_str()); + m_appSendToIngressPortTable.del(alias); + } + else + { + SWSS_LOG_ERROR("Unknown operation type %s", op.c_str()); + } + it = consumer.m_toSync.erase(it); + } + +} + void PortMgr::doTask(Consumer &consumer) { SWSS_LOG_ENTER(); auto table = consumer.getTableName(); + if (table == CFG_SEND_TO_INGRESS_PORT_TABLE_NAME) + { + doSendToIngressPortTask(consumer); + return; + } auto it = consumer.m_toSync.begin(); while (it != consumer.m_toSync.end()) @@ -146,7 +186,7 @@ void PortMgr::doTask(Consumer &consumer) { admin_status = fvValue(i); } - else + else { field_values.emplace_back(i); } diff --git a/cfgmgr/portmgr.h b/cfgmgr/portmgr.h index 683aacc488c..3d6f0365bff 100644 --- a/cfgmgr/portmgr.h +++ b/cfgmgr/portmgr.h @@ -22,13 +22,16 @@ class PortMgr : public Orch using Orch::doTask; private: Table m_cfgPortTable; + Table m_cfgSendToIngressPortTable; Table m_cfgLagMemberTable; Table m_statePortTable; ProducerStateTable m_appPortTable; + ProducerStateTable m_appSendToIngressPortTable; std::set m_portList; void doTask(Consumer &consumer); + void doSendToIngressPortTask(Consumer &consumer); bool writeConfigToAppDb(const std::string &alias, const std::string &field, const std::string &value); bool writeConfigToAppDb(const std::string &alias, std::vector &field_values); bool setPortMtu(const std::string &alias, const std::string &mtu); diff --git a/cfgmgr/portmgrd.cpp b/cfgmgr/portmgrd.cpp index 99c7974559b..4d04b42d383 100644 --- a/cfgmgr/portmgrd.cpp +++ b/cfgmgr/portmgrd.cpp @@ -26,6 +26,7 @@ int main(int argc, char **argv) { vector cfg_port_tables = { CFG_PORT_TABLE_NAME, + CFG_SEND_TO_INGRESS_PORT_TABLE_NAME, }; DBConnector cfgDb("CONFIG_DB", 0); diff --git a/orchagent/Makefile.am b/orchagent/Makefile.am index 12347d954f0..2af9d4aa0d7 100644 --- a/orchagent/Makefile.am +++ b/orchagent/Makefile.am @@ -59,6 +59,7 @@ orchagent_SOURCES = \ mplsrouteorch.cpp \ neighorch.cpp \ intfsorch.cpp \ + port/port_capabilities.cpp \ port/porthlpr.cpp \ portsorch.cpp \ fabricportsorch.cpp \ @@ -110,6 +111,8 @@ orchagent_SOURCES = \ dash/dashrouteorch.cpp \ dash/dashvnetorch.cpp \ dash/dashaclorch.cpp \ + dash/dashaclgroupmgr.cpp \ + dash/dashtagmgr.cpp \ dash/pbutils.cpp orchagent_SOURCES += flex_counter/flex_counter_manager.cpp flex_counter/flex_counter_stat_manager.cpp flex_counter/flow_counter_handler.cpp flex_counter/flowcounterrouteorch.cpp diff --git a/orchagent/aclorch.h b/orchagent/aclorch.h index f713aba3b39..abeaf519e2e 100644 --- a/orchagent/aclorch.h +++ b/orchagent/aclorch.h @@ -86,9 +86,9 @@ #define IP_TYPE_IP "IP" #define IP_TYPE_NON_IP "NON_IP" #define IP_TYPE_IPv4ANY "IPV4ANY" -#define IP_TYPE_NON_IPv4 "NON_IPv4" +#define IP_TYPE_NON_IPv4 "NON_IPV4" #define IP_TYPE_IPv6ANY "IPV6ANY" -#define IP_TYPE_NON_IPv6 "NON_IPv6" +#define IP_TYPE_NON_IPv6 "NON_IPV6" #define IP_TYPE_ARP "ARP" #define IP_TYPE_ARP_REQUEST "ARP_REQUEST" #define IP_TYPE_ARP_REPLY "ARP_REPLY" diff --git a/orchagent/bufferorch.cpp b/orchagent/bufferorch.cpp index f4df8541c36..91f13578a6f 100644 --- a/orchagent/bufferorch.cpp +++ b/orchagent/bufferorch.cpp @@ -827,7 +827,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple) return task_process_status::task_invalid_entry; } - if(tokens[0] == gMyHostName) + if((tokens[0] == gMyHostName) && (tokens[1] == gMyAsicName)) { local_port = true; local_port_name = tokens[2]; diff --git a/orchagent/dash/dashaclgroupmgr.cpp b/orchagent/dash/dashaclgroupmgr.cpp new file mode 100644 index 00000000000..22a730e7fe9 --- /dev/null +++ b/orchagent/dash/dashaclgroupmgr.cpp @@ -0,0 +1,734 @@ +#include + +#include + +#include "dashaclgroupmgr.h" + +#include "crmorch.h" +#include "dashorch.h" +#include "dashaclorch.h" +#include "saihelper.h" +#include "pbutils.h" + +extern sai_dash_acl_api_t* sai_dash_acl_api; +extern sai_dash_eni_api_t* sai_dash_eni_api; +extern sai_object_id_t gSwitchId; +extern CrmOrch *gCrmOrch; + +using namespace std; +using namespace swss; +using namespace dash::acl_in; +using namespace dash::acl_out; +using namespace dash::acl_rule; +using namespace dash::acl_group; +using namespace dash::tag; +using namespace dash::types; + +const static vector all_protocols(boost::counting_iterator(0), boost::counting_iterator(UINT8_MAX + 1)); +const static vector all_ports = {{numeric_limits::min(), numeric_limits::max()}}; + +bool from_pb(const AclRule& data, DashAclRule& rule) +{ + rule.m_priority = data.priority(); + rule.m_action = (data.action() == ACTION_PERMIT) ? DashAclRule::Action::ALLOW : DashAclRule::Action::DENY; + rule.m_terminating = data.terminating(); + + if (data.protocol_size()) + { + rule.m_protocols.reserve(data.protocol_size()); + rule.m_protocols.assign(data.protocol().begin(), data.protocol().end()); + } + + if (!to_sai(data.src_addr(), rule.m_src_prefixes)) + { + return false; + } + + if (!to_sai(data.dst_addr(), rule.m_dst_prefixes)) + { + return false; + } + + if (data.src_tag_size()) + { + rule.m_src_tags.insert(data.src_tag().begin(), data.src_tag().end()); + } + + if (data.dst_tag_size()) + { + rule.m_dst_tags.insert(data.dst_tag().begin(), data.dst_tag().end()); + } + + if (!data.src_port_size()) + { + rule.m_src_ports = all_ports; + } + else if (!to_sai(data.src_port(), rule.m_src_ports)) + { + return false; + } + + if (!data.dst_port_size()) + { + rule.m_dst_ports = all_ports; + } + else if (!to_sai(data.dst_port(), rule.m_dst_ports)) + { + return false; + } + + return true; +} + +bool from_pb(const dash::acl_group::AclGroup &data, DashAclGroup& group) +{ + if (!to_sai(data.ip_version(), group.m_ip_version)) + { + return false; + } + + return true; +} + +sai_attr_id_t getSaiStage(DashAclDirection d, sai_ip_addr_family_t f, DashAclStage s) +{ + const static map, sai_attr_id_t> StageMaps = + { + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE1}, SAI_ENI_ATTR_INBOUND_V4_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE2}, SAI_ENI_ATTR_INBOUND_V4_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE3}, SAI_ENI_ATTR_INBOUND_V4_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE4}, SAI_ENI_ATTR_INBOUND_V4_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE5}, SAI_ENI_ATTR_INBOUND_V4_STAGE5_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE1}, SAI_ENI_ATTR_INBOUND_V6_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE2}, SAI_ENI_ATTR_INBOUND_V6_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE3}, SAI_ENI_ATTR_INBOUND_V6_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE4}, SAI_ENI_ATTR_INBOUND_V6_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::IN, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE5}, SAI_ENI_ATTR_INBOUND_V6_STAGE5_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE1}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE2}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE3}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE4}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV4, DashAclStage::STAGE5}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE5_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE1}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE1_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE2}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE2_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE3}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE3_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE4}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE4_DASH_ACL_GROUP_ID}, + {{DashAclDirection::OUT, SAI_IP_ADDR_FAMILY_IPV6, DashAclStage::STAGE5}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE5_DASH_ACL_GROUP_ID}, + }; + + auto stage = StageMaps.find({d, f, s}); + if (stage == StageMaps.end()) + { + SWSS_LOG_ERROR("Invalid stage %d %d %d", static_cast(d), f, static_cast(s)); + throw runtime_error("Invalid stage"); + } + + return stage->second; +} + +DashAclGroupMgr::DashAclGroupMgr(DashOrch *dashorch, DashAclOrch *aclorch) : + m_dash_orch(dashorch), + m_dash_acl_orch(aclorch) +{ + SWSS_LOG_ENTER(); +} + +void DashAclGroupMgr::init(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + group.m_dash_acl_group_id = SAI_NULL_OBJECT_ID; + + for (auto& rule: group.m_dash_acl_rule_table) + { + rule.second.m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; + } +} + +void DashAclGroupMgr::create(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + vector attrs; + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY; + attrs.back().value.s32 = group.m_ip_version; + + auto status = sai_dash_acl_api->create_dash_acl_group(&group.m_dash_acl_group_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ACL group: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiCreateStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; + gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); +} + +task_process_status DashAclGroupMgr::create(const string& group_id, DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + if (exists(group_id)) + { + return task_failed; + } + + create(group); + + m_groups_table.emplace(group_id, group); + + SWSS_LOG_INFO("Created ACL group %s", group_id.c_str()); + + return task_success; +} + +void DashAclGroupMgr::remove(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + if (group.m_dash_acl_group_id == SAI_NULL_OBJECT_ID) + { + return; + } + + sai_status_t status = sai_dash_acl_api->remove_dash_acl_group(group.m_dash_acl_group_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ACL group: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; + gCrmOrch->decCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); + + group.m_dash_acl_group_id = SAI_NULL_OBJECT_ID; +} + +task_process_status DashAclGroupMgr::remove(const string& group_id) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("ACL group %s doesn't exist", group_id.c_str()); + return task_success; + } + + auto& group = group_it->second; + + if (!group.m_dash_acl_rule_table.empty()) + { + SWSS_LOG_ERROR("ACL group %s still has %zu rules", group_id.c_str(), group.m_dash_acl_rule_table.size()); + return task_need_retry; + } + + if (isBound(group)) + { + SWSS_LOG_ERROR("ACL group %s still has %zu references", group_id.c_str(), group.m_in_tables.size() + group.m_out_tables.size()); + return task_need_retry; + } + + remove(group); + + m_groups_table.erase(group_id); + SWSS_LOG_INFO("Removed ACL group %s", group_id.c_str()); + + return task_success; +} + +bool DashAclGroupMgr::exists(const string& group_id) const +{ + SWSS_LOG_ENTER(); + + return m_groups_table.find(group_id) != m_groups_table.end(); +} + +void DashAclGroupMgr::onUpdate(const string& group_id, const string& tag_id, const DashTag& tag) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + return; + } + + auto& group = group_it->second; + if (isBound(group)) + { + // If the group is bound to at least one ENI refresh the full group to update the affected rules. + // When the group is bound to the ENI we need to make sure that the update of the affected rules will be atomic. + SWSS_LOG_INFO("Update full ACL group %s", group_id.c_str()); + + refreshAclGroupFull(group_id); + } + else + { + // If the group is not bound to ENI update the rule immediately. + SWSS_LOG_INFO("Update ACL group %s", group_id.c_str()); + for (auto& rule_it: group.m_dash_acl_rule_table) + { + auto& rule = rule_it.second; + if (rule.m_src_tags.find(tag_id) != rule.m_src_tags.end() || rule.m_dst_tags.find(tag_id) != rule.m_dst_tags.end()) + { + removeRule(group, rule); + createRule(group, rule); + } + } + } +} + +void DashAclGroupMgr::refreshAclGroupFull(const string &group_id) +{ + SWSS_LOG_ENTER(); + + auto& group = m_groups_table[group_id]; + + DashAclGroup new_group = group; + init(new_group); + create(new_group); + + for (auto& rule: new_group.m_dash_acl_rule_table) + { + createRule(new_group, rule.second); + } + + for (const auto& table: new_group.m_in_tables) + { + const auto& eni_id = table.first; + const auto& stages = table.second; + + const auto eni = m_dash_orch->getEni(eni_id); + ABORT_IF_NOT(eni != nullptr, "Failed to get ENI %s", eni_id.c_str()); + + for (const auto& stage: stages) + { + bind(new_group, *eni, DashAclDirection::IN, stage); + } + } + + for (const auto& table: new_group.m_out_tables) + { + const auto& eni_id = table.first; + const auto& stages = table.second; + + const auto eni = m_dash_orch->getEni(eni_id); + ABORT_IF_NOT(eni != nullptr, "Failed to get ENI %s", eni_id.c_str()); + + for (const auto& stage: stages) + { + bind(new_group, *eni, DashAclDirection::OUT, stage); + } + } + + removeAclGroupFull(group); + + group = new_group; +} + +void DashAclGroupMgr::removeAclGroupFull(DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + for (auto& rule: group.m_dash_acl_rule_table) + { + removeRule(group, rule.second); + } + + remove(group); +} + +void DashAclGroupMgr::createRule(DashAclGroup& group, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + vector attrs; + vector src_prefixes = {}; + vector dst_prefixes = {}; + + auto any_ip = [] (const auto& g) + { + sai_ip_prefix_t ip_prefix = {}; + ip_prefix.addr_family = g.isIpV4() ? SAI_IP_ADDR_FAMILY_IPV4 : SAI_IP_ADDR_FAMILY_IPV6; + return ip_prefix; + }; + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_PRIORITY; + attrs.back().value.u32 = rule.m_priority; + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_ACTION; + + if (rule.m_action == DashAclRule::Action::ALLOW) + { + attrs.back().value.s32 = rule.m_terminating ? + SAI_DASH_ACL_RULE_ACTION_PERMIT : SAI_DASH_ACL_RULE_ACTION_PERMIT_AND_CONTINUE; + } + else + { + attrs.back().value.s32 = rule.m_terminating ? + SAI_DASH_ACL_RULE_ACTION_DENY : SAI_DASH_ACL_RULE_ACTION_DENY_AND_CONTINUE; + } + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_PROTOCOL; + + vector protocols; + if (rule.m_protocols.size()) { + protocols = rule.m_protocols; + } else { + protocols = all_protocols; + } + + attrs.back().value.u8list.count = static_cast(protocols.size()); + attrs.back().value.u8list.list = protocols.data(); + + if (!rule.m_src_prefixes.empty()) + { + src_prefixes.insert(src_prefixes.end(), + rule.m_src_prefixes.begin(), rule.m_src_prefixes.end()); + } + + if (!rule.m_dst_prefixes.empty()) + { + dst_prefixes.insert(dst_prefixes.end(), + rule.m_dst_prefixes.begin(), rule.m_dst_prefixes.end()); + } + + for (const auto &tag : rule.m_src_tags) + { + const auto& prefixes = m_dash_acl_orch->getDashAclTagMgr().getPrefixes(tag); + + src_prefixes.insert(src_prefixes.end(), + prefixes.begin(), prefixes.end()); + } + + for (const auto &tag : rule.m_dst_tags) + { + const auto& prefixes = m_dash_acl_orch->getDashAclTagMgr().getPrefixes(tag); + + dst_prefixes.insert(dst_prefixes.end(), + prefixes.begin(), prefixes.end()); + } + + if (src_prefixes.empty()) + { + src_prefixes.push_back(any_ip(group)); + } + + if (dst_prefixes.empty()) + { + dst_prefixes.push_back(any_ip(group)); + } + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_SIP; + attrs.back().value.ipprefixlist.count = static_cast(src_prefixes.size()); + attrs.back().value.ipprefixlist.list = src_prefixes.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DIP; + attrs.back().value.ipprefixlist.count = static_cast(dst_prefixes.size()); + attrs.back().value.ipprefixlist.list = dst_prefixes.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_SRC_PORT; + attrs.back().value.u16rangelist.count = static_cast(rule.m_src_ports.size()); + attrs.back().value.u16rangelist.list = rule.m_src_ports.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DST_PORT; + attrs.back().value.u16rangelist.count = static_cast(rule.m_dst_ports.size()); + attrs.back().value.u16rangelist.list = rule.m_dst_ports.data(); + + attrs.emplace_back(); + attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DASH_ACL_GROUP_ID; + attrs.back().value.oid = group.m_dash_acl_group_id; + + auto status = sai_dash_acl_api->create_dash_acl_rule(&rule.m_dash_acl_rule_id, gSwitchId, static_cast(attrs.size()), attrs.data()); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to create ACL rule: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiCreateStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_rtype = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; + gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, group.m_dash_acl_group_id); +} + +task_process_status DashAclGroupMgr::createRule(const string& group_id, const string& rule_id, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("ACL group %s doesn't exist, waiting for group creating before creating rule %s", group_id.c_str(), rule_id.c_str()); + return task_need_retry; + } + auto& group = group_it->second; + + auto acl_rule_it = group.m_dash_acl_rule_table.find(rule_id); + ABORT_IF_NOT(acl_rule_it == group.m_dash_acl_rule_table.end(), "Failed to create ACL rule %s. Rule already exist in ACL group %s", rule_id.c_str(), group_id.c_str()); + + createRule(group, rule); + + group.m_dash_acl_rule_table.emplace(rule_id, rule); + attachTags(group_id, rule.m_src_tags); + attachTags(group_id, rule.m_dst_tags); + + SWSS_LOG_INFO("Created ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); + + return task_success; +} + +task_process_status DashAclGroupMgr::updateRule(const string& group_id, const string& rule_id, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + if (isBound(group_id)) + { + SWSS_LOG_INFO("Failed to update dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); + return task_failed; + } + + if (ruleExists(group_id, rule_id)) + { + removeRule(group_id, rule_id); + } + + createRule(group_id, rule_id, rule); + + return task_success; +} + +void DashAclGroupMgr::removeRule(DashAclGroup& group, DashAclRule& rule) +{ + SWSS_LOG_ENTER(); + + if (rule.m_dash_acl_rule_id == SAI_NULL_OBJECT_ID) + { + return; + } + + // Remove the ACL group + auto status = sai_dash_acl_api->remove_dash_acl_rule(rule.m_dash_acl_rule_id); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to remove ACL rule: %d, %s", status, sai_serialize_status(status).c_str()); + handleSaiRemoveStatus((sai_api_t)SAI_API_DASH_ACL, status); + } + + CrmResourceType crm_resource = (group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? + CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; + gCrmOrch->decCrmDashAclUsedCounter(crm_resource, group.m_dash_acl_group_id); + + rule.m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; +} + +task_process_status DashAclGroupMgr::removeRule(const string& group_id, const string& rule_id) +{ + SWSS_LOG_ENTER(); + + if (!exists(group_id) || !ruleExists(group_id, rule_id)) + { + SWSS_LOG_INFO("ACL rule %s:%s does not exists", group_id.c_str(), rule_id.c_str()); + return task_success; + } + + auto& group = m_groups_table[group_id]; + if (isBound(group)) + { + SWSS_LOG_INFO("Failed to remove dash ACL rule %s:%s, ACL group is bound to the ENI", group_id.c_str(), rule_id.c_str()); + return task_need_retry; + } + + auto& rule = group.m_dash_acl_rule_table[rule_id]; + + removeRule(group, rule); + + detachTags(group_id, rule.m_src_tags); + detachTags(group_id, rule.m_dst_tags); + + group.m_dash_acl_rule_table.erase(rule_id); + + SWSS_LOG_INFO("Removed ACL rule %s:%s", group_id.c_str(), rule_id.c_str()); + + return task_success; +} + +void DashAclGroupMgr::bind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = getSaiStage(direction, group.m_ip_version, stage); + attr.value.oid = group.m_dash_acl_group_id; + + auto status = sai_dash_eni_api->set_eni_attribute(eni.eni_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to bind ACL group to ENI: %d", status); + handleSaiSetStatus((sai_api_t)SAI_API_DASH_ENI, status); + } +} + +bool DashAclGroupMgr::ruleExists(const string& group_id, const string& rule_id) const +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + return false; + } + + return group_it->second.m_dash_acl_rule_table.find(rule_id) != group_it->second.m_dash_acl_rule_table.end(); +} + +task_process_status DashAclGroupMgr::bind(const string& group_id, const string& eni_id, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("Failed to bind ACL group %s to ENI %s. ACL group does not exist", group_id.c_str(), eni_id.c_str()); + return task_need_retry; + } + + auto& group = group_it->second; + + if (group.m_dash_acl_rule_table.empty()) + { + SWSS_LOG_INFO("ACL group %s has no rules attached. Waiting for ACL rules creation", group_id.c_str()); + return task_need_retry; + } + + auto eni = m_dash_orch->getEni(eni_id); + if (!eni) + { + SWSS_LOG_INFO("eni %s cannot be found", eni_id.c_str()); + return task_need_retry; + } + + bind(group, *eni, direction, stage); + + auto& table = (direction == DashAclDirection::IN) ? group.m_in_tables : group.m_out_tables; + auto& eni_stages = table[eni_id]; + + eni_stages.insert(stage); + + SWSS_LOG_INFO("Bound ACL group %s to ENI %s", group_id.c_str(), eni_id.c_str()); + + return task_success; +} + +void DashAclGroupMgr::unbind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + + attr.id = getSaiStage(direction, group.m_ip_version, stage); + attr.value.oid = SAI_NULL_OBJECT_ID; + + auto status = sai_dash_eni_api->set_eni_attribute(eni.eni_id, &attr); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to unbind ACL group from ENI: %d", status); + handleSaiSetStatus((sai_api_t)SAI_API_DASH_ENI, status); + } +} + +task_process_status DashAclGroupMgr::unbind(const string& group_id, const string& eni_id, DashAclDirection direction, DashAclStage stage) +{ + SWSS_LOG_ENTER(); + + auto group_it = m_groups_table.find(group_id); + if (group_it == m_groups_table.end()) + { + SWSS_LOG_INFO("ACL group %s does not exist", group_id.c_str()); + return task_success; + } + + auto& group = group_it->second; + + auto eni_entry = m_dash_orch->getEni(eni_id); + if (!eni_entry) + { + SWSS_LOG_INFO("eni %s cannot be found", eni_id.c_str()); + return task_success; + } + + auto& table = (direction == DashAclDirection::IN) ? group.m_in_tables : group.m_out_tables; + auto eni_it = table.find(eni_id); + if (eni_it == table.end()) + { + SWSS_LOG_INFO("ACL group %s is not bound to ENI %s", group_id.c_str(), eni_id.c_str()); + return task_success; + } + + auto& eni_stages = eni_it->second; + if (eni_stages.find(stage) == eni_stages.end()) + { + SWSS_LOG_INFO("ACL group %s is not bound to ENI %s stage %d", group_id.c_str(), eni_id.c_str(), static_cast(stage)); + return task_success; + } + + unbind(group, *eni_entry, direction, stage); + + eni_stages.erase(stage); + if (eni_stages.empty()) + { + table.erase(eni_it); + } + + return task_success; +} + +bool DashAclGroupMgr::isBound(const string &group_id) +{ + SWSS_LOG_ENTER(); + + if (!exists(group_id)) + { + return false; + } + + return isBound(m_groups_table[group_id]); +} + +bool DashAclGroupMgr::isBound(const DashAclGroup& group) +{ + SWSS_LOG_ENTER(); + + return !group.m_in_tables.empty() || !group.m_out_tables.empty(); +} + +void DashAclGroupMgr::attachTags(const string &group_id, const unordered_set& tags) +{ + SWSS_LOG_ENTER(); + + for (const auto& tag_id : tags) + { + m_dash_acl_orch->getDashAclTagMgr().attach(tag_id, group_id); + } +} + +void DashAclGroupMgr::detachTags(const string &group_id, const unordered_set& tags) +{ + SWSS_LOG_ENTER(); + + for (const auto& tag_id : tags) + { + m_dash_acl_orch->getDashAclTagMgr().detach(tag_id, group_id); + } +} diff --git a/orchagent/dash/dashaclgroupmgr.h b/orchagent/dash/dashaclgroupmgr.h new file mode 100644 index 00000000000..6ef9498cd29 --- /dev/null +++ b/orchagent/dash/dashaclgroupmgr.h @@ -0,0 +1,124 @@ +#pragma once + +#include + +#include +#include +#include + +#include "dashorch.h" +#include "dashtagmgr.h" + +#include "dash_api/acl_group.pb.h" +#include "dash_api/acl_rule.pb.h" +#include "dash_api/acl_in.pb.h" +#include "dash_api/acl_out.pb.h" + +enum class DashAclStage +{ + STAGE1, + STAGE2, + STAGE3, + STAGE4, + STAGE5, +}; + +enum class DashAclDirection +{ + IN, + OUT, +}; + +struct DashAclRule +{ + sai_object_id_t m_dash_acl_rule_id = SAI_NULL_OBJECT_ID; + + enum class Action + { + ALLOW, + DENY, + }; + + sai_uint32_t m_priority; + Action m_action; + bool m_terminating; + std::vector m_protocols; + std::vector m_src_prefixes; + std::vector m_dst_prefixes; + std::unordered_set m_src_tags; + std::unordered_set m_dst_tags; + std::vector m_src_ports; + std::vector m_dst_ports; +}; + +struct DashAclGroup +{ + using EniTable = std::unordered_map>; + using RuleTable = std::unordered_map; + using RuleKeys = std::unordered_set; + sai_object_id_t m_dash_acl_group_id = SAI_NULL_OBJECT_ID; + + std::string m_guid; + sai_ip_addr_family_t m_ip_version; + RuleTable m_dash_acl_rule_table; + + EniTable m_in_tables; + EniTable m_out_tables; + + bool isIpV4() const + { + return m_ip_version == SAI_IP_ADDR_FAMILY_IPV4; + } + + bool isIpV6() const + { + return m_ip_version == SAI_IP_ADDR_FAMILY_IPV6; + } +}; + +bool from_pb(const dash::acl_rule::AclRule& data, DashAclRule& rule); +bool from_pb(const dash::acl_group::AclGroup &data, DashAclGroup& group); + +class DashAclOrch; + +class DashAclGroupMgr +{ + DashOrch *m_dash_orch; + DashAclOrch *m_dash_acl_orch; + std::unordered_map m_groups_table; + +public: + DashAclGroupMgr(DashOrch *dashorch, DashAclOrch *aclorch); + + task_process_status create(const std::string& group_id, DashAclGroup& group); + task_process_status remove(const std::string& group_id); + bool exists(const std::string& group_id) const; + + void onUpdate(const std::string& group_id, const std::string& tag_id,const DashTag& tag); + + task_process_status createRule(const std::string& group_id, const std::string& rule_id, DashAclRule& rule); + task_process_status updateRule(const std::string& group_id, const std::string& rule_id, DashAclRule& rule); + task_process_status removeRule(const std::string& group_id, const std::string& rule_id); + bool ruleExists(const std::string& group_id, const std::string& rule_id) const; + + task_process_status bind(const std::string& group_id, const std::string& eni_id, DashAclDirection direction, DashAclStage stage); + task_process_status unbind(const std::string& group_id, const std::string& eni_id, DashAclDirection direction, DashAclStage stage); + +private: + void init(DashAclGroup& group); + void create(DashAclGroup& group); + void remove(DashAclGroup& group); + + void createRule(DashAclGroup& group, DashAclRule& rule); + void removeRule(DashAclGroup& group, DashAclRule& rule); + + void bind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage); + void unbind(const DashAclGroup& group, const EniEntry& eni, DashAclDirection direction, DashAclStage stage); + bool isBound(const std::string &group_id); + bool isBound(const DashAclGroup& group); + void attachTags(const std::string &group_id, const std::unordered_set& tags); + void detachTags(const std::string &group_id, const std::unordered_set& tags); + + void refreshAclGroupFull(const std::string &group_id); + void removeAclGroupFull(DashAclGroup& group); +}; diff --git a/orchagent/dash/dashaclorch.cpp b/orchagent/dash/dashaclorch.cpp index 31022dc4a48..9c14d637196 100644 --- a/orchagent/dash/dashaclorch.cpp +++ b/orchagent/dash/dashaclorch.cpp @@ -5,14 +5,12 @@ #include -#include -#include - #include "dashaclorch.h" #include "taskworker.h" #include "pbutils.h" #include "crmorch.h" -#include "dashaclorch.h" +#include "dashaclgroupmgr.h" +#include "dashtagmgr.h" #include "saihelper.h" using namespace std; @@ -21,18 +19,12 @@ using namespace dash::acl_in; using namespace dash::acl_out; using namespace dash::acl_rule; using namespace dash::acl_group; +using namespace dash::tag; using namespace dash::types; -extern sai_dash_acl_api_t* sai_dash_acl_api; -extern sai_dash_eni_api_t* sai_dash_eni_api; -extern sai_object_id_t gSwitchId; -extern CrmOrch *gCrmOrch; - template static bool extractVariables(const string &input, char delimiter, T &output, Args &... args) { - SWSS_LOG_ENTER(); - const auto tokens = swss::tokenize(input, delimiter); try { @@ -45,49 +37,63 @@ static bool extractVariables(const string &input, char delimiter, T &output, Arg } } -sai_attr_id_t getSaiStage(DashAclDirection d, sai_ip_addr_family_t f, uint32_t s) +namespace swss { + +template<> +inline void lexical_convert(const string &buffer, DashAclStage &stage) { - const static map, sai_attr_id_t> StageMaps = - { - {{IN, SAI_IP_ADDR_FAMILY_IPV4, 1}, SAI_ENI_ATTR_INBOUND_V4_STAGE1_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV4, 2}, SAI_ENI_ATTR_INBOUND_V4_STAGE2_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV4, 3}, SAI_ENI_ATTR_INBOUND_V4_STAGE3_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV4, 4}, SAI_ENI_ATTR_INBOUND_V4_STAGE4_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV4, 5}, SAI_ENI_ATTR_INBOUND_V4_STAGE5_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV6, 1}, SAI_ENI_ATTR_INBOUND_V6_STAGE1_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV6, 2}, SAI_ENI_ATTR_INBOUND_V6_STAGE2_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV6, 3}, SAI_ENI_ATTR_INBOUND_V6_STAGE3_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV6, 4}, SAI_ENI_ATTR_INBOUND_V6_STAGE4_DASH_ACL_GROUP_ID}, - {{IN, SAI_IP_ADDR_FAMILY_IPV6, 5}, SAI_ENI_ATTR_INBOUND_V6_STAGE5_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV4, 1}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE1_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV4, 2}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE2_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV4, 3}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE3_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV4, 4}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE4_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV4, 5}, SAI_ENI_ATTR_OUTBOUND_V4_STAGE5_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV6, 1}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE1_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV6, 2}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE2_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV6, 3}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE3_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV6, 4}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE4_DASH_ACL_GROUP_ID}, - {{OUT, SAI_IP_ADDR_FAMILY_IPV6, 5}, SAI_ENI_ATTR_OUTBOUND_V6_STAGE5_DASH_ACL_GROUP_ID}, - }; - - auto stage = StageMaps.find({d, f, s}); - if (stage == StageMaps.end()) + SWSS_LOG_ENTER(); + + if (buffer == "1") + { + stage = DashAclStage::STAGE1; + } + else if (buffer == "2") + { + stage = DashAclStage::STAGE2; + } + else if (buffer == "3") { - SWSS_LOG_WARN("Invalid stage %d %d %d", d, f, s); - throw runtime_error("Invalid stage"); + stage = DashAclStage::STAGE3; } + else if (buffer == "4") + { + stage = DashAclStage::STAGE4; + } + else if (buffer == "5") + { + stage = DashAclStage::STAGE5; + } + else + { + SWSS_LOG_ERROR("Invalid stage : %s", buffer.c_str()); + throw invalid_argument("Invalid stage"); + } + +} - return stage->second; } DashAclOrch::DashAclOrch(DBConnector *db, const vector &tables, DashOrch *dash_orch, ZmqServer *zmqServer) : ZmqOrch(db, tables, zmqServer), - m_dash_orch(dash_orch) + m_dash_orch(dash_orch), + m_group_mgr(dash_orch, this), + m_tag_mgr(this) + { SWSS_LOG_ENTER(); +} - assert(m_dash_orch); +DashAclGroupMgr& DashAclOrch::getDashAclGroupMgr() +{ + SWSS_LOG_ENTER(); + return m_group_mgr; +} + +DashTagMgr& DashAclOrch::getDashAclTagMgr() +{ + SWSS_LOG_ENTER(); + return m_tag_mgr; } void DashAclOrch::doTask(ConsumerBase &consumer) @@ -97,13 +103,15 @@ void DashAclOrch::doTask(ConsumerBase &consumer) const static TaskMap TaskMap = { PbWorker::makeMemberTask(APP_DASH_ACL_IN_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclIn, this), KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_IN_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclIn, this), - PbWorker::makeMemberTask(APP_DASH_ACL_IN_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclOut, this), - KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_IN_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclOut, this), + PbWorker::makeMemberTask(APP_DASH_ACL_OUT_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclOut, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_OUT_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclOut, this), PbWorker::makeMemberTask(APP_DASH_ACL_GROUP_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclGroup, this), KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_GROUP_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclGroup, this), PbWorker::makeMemberTask(APP_DASH_ACL_RULE_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashAclRule, this), KeyOnlyWorker::makeMemberTask(APP_DASH_ACL_RULE_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashAclRule, this), - }; + PbWorker::makeMemberTask(APP_DASH_PREFIX_TAG_TABLE_NAME, SET_COMMAND, &DashAclOrch::taskUpdateDashPrefixTag, this), + KeyOnlyWorker::makeMemberTask(APP_DASH_PREFIX_TAG_TABLE_NAME, DEL_COMMAND, &DashAclOrch::taskRemoveDashPrefixTag, this), + }; const string &table_name = consumer.getTableName(); auto itr = consumer.m_toSync.begin(); @@ -161,23 +169,19 @@ task_process_status DashAclOrch::taskUpdateDashAclIn( { SWSS_LOG_ENTER(); - task_process_status v4_status = task_success, v6_status = task_success; - if (!data.v4_acl_group_id().empty()) - { - v4_status = bindAclToEni(m_dash_acl_in_table, key, data.v4_acl_group_id()); - } - if (v4_status != task_success) - { - return v4_status; - } - if (!data.v6_acl_group_id().empty()) + for (const auto& gid: { data.v4_acl_group_id(), data.v6_acl_group_id() }) { - v6_status = bindAclToEni(m_dash_acl_in_table, key, data.v6_acl_group_id()); - } - if (v6_status != task_success) - { - return v6_status; + if (gid.empty()) + { + continue; + } + auto status = bindAclToEni(DashAclDirection::IN, key, gid); + if (status != task_success) + { + return status; + } } + return task_success; } @@ -186,7 +190,7 @@ task_process_status DashAclOrch::taskRemoveDashAclIn( { SWSS_LOG_ENTER(); - return unbindAclFromEni(m_dash_acl_in_table, key); + return unbindAclFromEni(DashAclDirection::IN, key); } task_process_status DashAclOrch::taskUpdateDashAclOut( @@ -195,23 +199,19 @@ task_process_status DashAclOrch::taskUpdateDashAclOut( { SWSS_LOG_ENTER(); - task_process_status v4_status = task_success, v6_status = task_success; - if (!data.v4_acl_group_id().empty()) - { - v4_status = bindAclToEni(m_dash_acl_out_table, key, data.v4_acl_group_id()); - } - if (v4_status != task_success) - { - return v4_status; - } - if (!data.v6_acl_group_id().empty()) - { - v6_status = bindAclToEni(m_dash_acl_out_table, key, data.v6_acl_group_id()); - } - if (v6_status != task_success) + for (const auto& gid: { data.v4_acl_group_id(), data.v6_acl_group_id() }) { - return v6_status; + if (gid.empty()) + { + continue; + } + auto status = bindAclToEni(DashAclDirection::OUT, key, gid); + if (status != task_success) + { + return status; + } } + return task_success; } @@ -220,7 +220,7 @@ task_process_status DashAclOrch::taskRemoveDashAclOut( { SWSS_LOG_ENTER(); - return unbindAclFromEni(m_dash_acl_out_table, key); + return unbindAclFromEni(DashAclDirection::OUT, key); } task_process_status DashAclOrch::taskUpdateDashAclGroup( @@ -229,40 +229,19 @@ task_process_status DashAclOrch::taskUpdateDashAclGroup( { SWSS_LOG_ENTER(); - if (m_dash_acl_group_table.find(key) != m_dash_acl_group_table.end()) + if (m_group_mgr.exists(key)) { - // Update the ACL group's attributes SWSS_LOG_WARN("Cannot update attributes of ACL group %s", key.c_str()); return task_failed; } - sai_ip_addr_family_t ip_version = data.ip_version() == IpVersion::IP_VERSION_IPV4 ? SAI_IP_ADDR_FAMILY_IPV4 : SAI_IP_ADDR_FAMILY_IPV6; - vector attrs; - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY; - attrs.back().value.s32 = ip_version; - - // Guid wasn't mapping to any SAI attributes - - // Create a new ACL group - DashAclGroupEntry acl_group; - sai_status_t status = sai_dash_acl_api->create_dash_acl_group(&acl_group.m_dash_acl_group_id, gSwitchId, static_cast(attrs.size()), attrs.data()); - if (status != SAI_STATUS_SUCCESS) + DashAclGroup group = {}; + if (!from_pb(data, group)) { - SWSS_LOG_WARN("Failed to create ACL group %s, rv: %s", key.c_str(), sai_serialize_status(status).c_str()); return task_failed; } - acl_group.m_rule_count = 0; - acl_group.m_ref_count = 0; - acl_group.m_ip_version = ip_version; - m_dash_acl_group_table.emplace(key, acl_group); - SWSS_LOG_NOTICE("Created ACL group %s", key.c_str()); - - CrmResourceType crm_rtype = (acl_group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? - CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; - gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, acl_group.m_dash_acl_group_id); - return task_success; + return m_group_mgr.create(key, group); } task_process_status DashAclOrch::taskRemoveDashAclGroup( @@ -270,43 +249,7 @@ task_process_status DashAclOrch::taskRemoveDashAclGroup( { SWSS_LOG_ENTER(); - auto acl_group = getAclGroup(key); - - if (acl_group == nullptr || acl_group->m_dash_acl_group_id == SAI_NULL_OBJECT_ID) - { - SWSS_LOG_WARN("ACL group %s doesn't exist", key.c_str()); - return task_success; - } - - // The member rules of group should be removed first - if (acl_group->m_rule_count != 0) - { - SWSS_LOG_INFO("ACL group %s still has %d rules", key.c_str(), acl_group->m_rule_count); - return task_need_retry; - } - - // The refer count of group should be cleaned first - if (acl_group->m_ref_count != 0) - { - SWSS_LOG_INFO("ACL group %s still has %d references", key.c_str(), acl_group->m_ref_count); - return task_need_retry; - } - - // Remove the ACL group - sai_status_t status = sai_dash_acl_api->remove_dash_acl_group(acl_group->m_dash_acl_group_id); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_WARN("Failed to remove ACL group %s, rv: %s", key.c_str(), sai_serialize_status(status).c_str()); - return task_failed; - } - CrmResourceType crm_rtype = (acl_group->m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? - CrmResourceType::CRM_DASH_IPV4_ACL_GROUP : CrmResourceType::CRM_DASH_IPV6_ACL_GROUP; - gCrmOrch->decCrmDashAclUsedCounter(crm_rtype, acl_group->m_dash_acl_group_id); - - m_dash_acl_group_table.erase(key); - SWSS_LOG_NOTICE("Removed ACL group %s", key.c_str()); - - return task_success; + return m_group_mgr.remove(key); } task_process_status DashAclOrch::taskUpdateDashAclRule( @@ -318,183 +261,25 @@ task_process_status DashAclOrch::taskUpdateDashAclRule( string group_id, rule_id; if (!extractVariables(key, ':', group_id, rule_id)) { - SWSS_LOG_WARN("Failed to parse key %s", key.c_str()); + SWSS_LOG_ERROR("Failed to parse key %s", key.c_str()); return task_failed; } - auto acl_group = getAclGroup(group_id); - if (acl_group == nullptr) - { - SWSS_LOG_INFO("ACL group %s doesn't exist, waiting for group creating before creating rule %s", group_id.c_str(), rule_id.c_str()); - return task_need_retry; - } - - if (m_dash_acl_rule_table.find(key) != m_dash_acl_rule_table.end()) - { - // Remove the old ACL rule - auto status = taskRemoveDashAclRule(key); - if (status != task_success) - { - return status; - } - } - - vector attrs; - - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_PRIORITY; - attrs.back().value.u32 = data.priority(); + DashAclRule rule = {}; - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_ACTION; - if (data.action() == Action::ACTION_PERMIT) - { - if (data.terminating()) - { - attrs.back().value.s32 = SAI_DASH_ACL_RULE_ACTION_PERMIT; - } - else - { - attrs.back().value.s32 = SAI_DASH_ACL_RULE_ACTION_PERMIT_AND_CONTINUE; - } - } - else + if (!from_pb(data, rule)) { - if (data.terminating()) - { - attrs.back().value.s32 = SAI_DASH_ACL_RULE_ACTION_DENY; - } - else - { - attrs.back().value.s32 = SAI_DASH_ACL_RULE_ACTION_DENY_AND_CONTINUE; - } + return task_failed; } - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_PROTOCOL; - vector protocols; - if (data.protocol_size() == 0) + if (m_group_mgr.ruleExists(group_id, rule_id)) { - const static vector ALL_PROTOCOLS = [](){ - vector protocols; - for (uint16_t i = 0; i <= 255; i++) - { - protocols.push_back(static_cast(i)); - } - return protocols; - }(); - attrs.back().value.u8list.count = static_cast(ALL_PROTOCOLS.size()); - attrs.back().value.u8list.list = const_cast(ALL_PROTOCOLS.data()); + return m_group_mgr.updateRule(group_id, rule_id, rule); } else { - protocols.reserve(data.protocol_size()); - protocols.assign(data.protocol().begin(), data.protocol().end()); - attrs.back().value.u8list.count = static_cast(protocols.size()); - attrs.back().value.u8list.list = protocols.data(); + return m_group_mgr.createRule(group_id, rule_id, rule); } - - - const static sai_ip_prefix_t SAI_ALL_IPV4_PREFIX = [](){ - sai_ip_prefix_t ip_prefix; - ip_prefix.addr_family = SAI_IP_ADDR_FAMILY_IPV4; - ip_prefix.addr.ip4 = 0; - ip_prefix.mask.ip4 = 0; - return ip_prefix; - }(); - - const static sai_ip_prefix_t SAI_ALL_IPV6_PREFIX = [](){ - sai_ip_prefix_t ip_prefix; - ip_prefix.addr_family = SAI_IP_ADDR_FAMILY_IPV6; - memset(ip_prefix.addr.ip6, 0, sizeof(ip_prefix.addr.ip6)); - memset(ip_prefix.mask.ip6, 0, sizeof(ip_prefix.mask.ip6)); - return ip_prefix; - }(); - - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_SIP; - vector src_prefixes; - if (data.src_addr_size() == 0) - { - src_prefixes.push_back( - acl_group->m_ip_version == SAI_IP_ADDR_FAMILY_IPV4 ? - SAI_ALL_IPV4_PREFIX : - SAI_ALL_IPV6_PREFIX); - } - else if (!to_sai(data.src_addr(), src_prefixes)) - { - return task_invalid_entry; - } - attrs.back().value.ipprefixlist.count = static_cast(src_prefixes.size()); - attrs.back().value.ipprefixlist.list = src_prefixes.data(); - - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DIP; - vector dst_prefixes; - if (data.src_addr_size() == 0) - { - dst_prefixes.push_back( - acl_group->m_ip_version == SAI_IP_ADDR_FAMILY_IPV4 ? - SAI_ALL_IPV4_PREFIX : - SAI_ALL_IPV6_PREFIX); - } - else if (!to_sai(data.src_addr(), dst_prefixes)) - { - return task_invalid_entry; - } - attrs.back().value.ipprefixlist.count = static_cast(dst_prefixes.size()); - attrs.back().value.ipprefixlist.list = dst_prefixes.data(); - - const static sai_u16_range_t SAI_ALL_PORTS{numeric_limits::min(), numeric_limits::max()}; - - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_SRC_PORT; - vector src_ports; - if (data.src_port_size() == 0) - { - src_ports.push_back(SAI_ALL_PORTS); - } - else if (!to_sai(data.src_port(), src_ports)) - { - return task_invalid_entry; - } - attrs.back().value.u16rangelist.count = static_cast(src_ports.size()); - attrs.back().value.u16rangelist.list = src_ports.data(); - - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DST_PORT; - vector dst_ports; - if (data.dst_port_size() == 0) - { - dst_ports.push_back(SAI_ALL_PORTS); - } - else if (!to_sai(data.dst_port(), dst_ports)) - { - return task_invalid_entry; - } - attrs.back().value.u16rangelist.count = static_cast(dst_ports.size()); - attrs.back().value.u16rangelist.list = dst_ports.data(); - - attrs.emplace_back(); - attrs.back().id = SAI_DASH_ACL_RULE_ATTR_DASH_ACL_GROUP_ID; - attrs.back().value.oid = acl_group->m_dash_acl_group_id; - - DashAclRuleEntry acl_rule; - sai_status_t status = sai_dash_acl_api->create_dash_acl_rule(&acl_rule.m_dash_acl_rule_id, gSwitchId, static_cast(attrs.size()), attrs.data()); - if (status != SAI_STATUS_SUCCESS) - { - SWSS_LOG_WARN("Failed to create dash ACL rule %s, rv: %s", key.c_str(), sai_serialize_status(status).c_str()); - return task_failed; - } - m_dash_acl_rule_table.emplace(key, acl_rule); - acl_group->m_rule_count++; - SWSS_LOG_NOTICE("Created ACL rule %s", key.c_str()); - - CrmResourceType crm_rtype = (acl_group->m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? - CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; - gCrmOrch->incCrmDashAclUsedCounter(crm_rtype, acl_group->m_dash_acl_group_id); - - return task_success; } task_process_status DashAclOrch::taskRemoveDashAclRule( @@ -505,197 +290,100 @@ task_process_status DashAclOrch::taskRemoveDashAclRule( string group_id, rule_id; if (!extractVariables(key, ':', group_id, rule_id)) { - SWSS_LOG_WARN("Failed to parse key %s", key.c_str()); + SWSS_LOG_ERROR("Failed to parse key %s", key.c_str()); return task_failed; } - auto &acl_group = m_dash_acl_group_table[group_id]; + return m_group_mgr.removeRule(group_id, rule_id); +} + +task_process_status DashAclOrch::taskUpdateDashPrefixTag( + const std::string &tag_id, + const PrefixTag &data) +{ + SWSS_LOG_ENTER(); - auto itr = m_dash_acl_rule_table.find(key); + DashTag tag = {}; - if (itr == m_dash_acl_rule_table.end()) + if (!from_pb(data, tag)) { - SWSS_LOG_WARN("ACL rule %s doesn't exist", key.c_str()); - return task_success; + return task_failed; } - auto &acl_rule = itr->second; - - bool is_existing = acl_rule.m_dash_acl_rule_id != SAI_NULL_OBJECT_ID; - - if (!is_existing) + if (m_tag_mgr.exists(tag_id)) { - SWSS_LOG_WARN("ACL rule %s doesn't exist", key.c_str()); - return task_success; + return m_tag_mgr.update(tag_id, tag); } - - // Remove the ACL group - sai_status_t status = sai_dash_acl_api->remove_dash_acl_rule(acl_rule.m_dash_acl_rule_id); - if (status != SAI_STATUS_SUCCESS) + else { - SWSS_LOG_WARN("Failed to remove dash ACL rule %s, rv: %s", key.c_str(), sai_serialize_status(status).c_str()); - return task_failed; + return m_tag_mgr.create(tag_id, tag); } - m_dash_acl_rule_table.erase(itr); - --acl_group.m_rule_count; - - CrmResourceType crm_resource = (acl_group.m_ip_version == SAI_IP_ADDR_FAMILY_IPV4) ? - CrmResourceType::CRM_DASH_IPV4_ACL_RULE : CrmResourceType::CRM_DASH_IPV6_ACL_RULE; - gCrmOrch->decCrmDashAclUsedCounter(crm_resource, acl_group.m_dash_acl_group_id); - - SWSS_LOG_NOTICE("Removed ACL rule %s", key.c_str()); - - return task_success; } -DashAclGroupEntry* DashAclOrch::getAclGroup(const string &group_id) +task_process_status DashAclOrch::taskRemoveDashPrefixTag( + const std::string &key) { SWSS_LOG_ENTER(); - auto itr = m_dash_acl_group_table.find(group_id); - - if (itr != m_dash_acl_group_table.end() && itr->second.m_dash_acl_group_id != SAI_NULL_OBJECT_ID) - { - return &itr->second; - } - else - { - return nullptr; - } + return m_tag_mgr.remove(key); } -task_process_status DashAclOrch::bindAclToEni(DashAclBindTable &acl_bind_table, const string &key, const string &acl_group_id) +task_process_status DashAclOrch::bindAclToEni(DashAclDirection direction, const std::string table_id, const std::string &acl_group_id) { SWSS_LOG_ENTER(); - assert(&acl_bind_table == &m_dash_acl_in_table || &acl_bind_table == &m_dash_acl_out_table); - DashAclDirection direction = ((&acl_bind_table == &m_dash_acl_in_table) ? DashAclDirection::IN : DashAclDirection::OUT); - string eni; - uint32_t stage; - if (!extractVariables(key, ':', eni, stage)) - { - SWSS_LOG_WARN("Invalid key : %s", key.c_str()); - return task_failed; - } + DashAclStage stage; - if (acl_group_id.empty()) + if (!extractVariables(table_id, ':', eni, stage)) { - SWSS_LOG_WARN("Empty group id in the key : %s", key.c_str()); + SWSS_LOG_ERROR("Invalid key : %s", table_id.c_str()); return task_failed; } - auto eni_entry = m_dash_orch->getEni(eni); - if (eni_entry == nullptr) - { - SWSS_LOG_INFO("eni %s cannot be found", eni.c_str()); - // The ENI may not be created yet, so we will wait for the ENI to be created - return task_need_retry; - } - - auto &acl_bind = acl_bind_table[key]; - - auto acl_group = getAclGroup(acl_group_id); - if (acl_group == nullptr) - { - SWSS_LOG_INFO("acl group %s cannot be found, wait for create", acl_group_id.c_str()); - return task_need_retry; - } - - if (acl_group->m_rule_count <= 0) - { - SWSS_LOG_INFO("acl group %s contains 0 rules, waiting for rule creation", acl_group_id.c_str()); - return task_need_retry; - } - - if (acl_bind.m_acl_group_id == acl_group_id) - { - SWSS_LOG_INFO("acl group %s is already bound to %s", acl_group_id.c_str(), key.c_str()); - return task_success; - } - else if (!acl_bind.m_acl_group_id.empty()) - { - auto old_acl_group = getAclGroup(acl_bind.m_acl_group_id); - if (old_acl_group != nullptr) - { - old_acl_group->m_ref_count--; - } - else - { - SWSS_LOG_WARN("Failed to find old acl group %s", acl_bind.m_acl_group_id.c_str()); - } - } - acl_bind.m_acl_group_id = acl_group_id; - - sai_attribute_t attr; - attr.id = getSaiStage(direction, acl_group->m_ip_version, stage); - attr.value.oid = acl_group->m_dash_acl_group_id; + DashAclEntry table = { .m_acl_group_id = acl_group_id }; - sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_entry->eni_id, &attr); - if (status != SAI_STATUS_SUCCESS) + auto rv = m_group_mgr.bind(table.m_acl_group_id, eni, direction, stage); + if (rv != task_success) { - SWSS_LOG_WARN("Failed to bind ACL %s to eni %s attribute, status : %s", key.c_str(), acl_group_id.c_str(), sai_serialize_status(status).c_str()); - return task_failed; + return rv; } - acl_group->m_ref_count++; - SWSS_LOG_NOTICE("Bind ACL group %s to %s", acl_group_id.c_str(), key.c_str()); + DashAclTable& tables = (direction == DashAclDirection::IN) ? m_dash_acl_in_table : m_dash_acl_out_table; + tables[table_id] = table; - return task_success; + return rv; } -task_process_status DashAclOrch::unbindAclFromEni(DashAclBindTable &acl_bind_table, const string &key) +task_process_status DashAclOrch::unbindAclFromEni(DashAclDirection direction, const std::string table_id) { SWSS_LOG_ENTER(); - assert(&acl_bind_table == &m_dash_acl_in_table || &acl_bind_table == &m_dash_acl_out_table); - DashAclDirection direction = ((&acl_bind_table == &m_dash_acl_in_table) ? DashAclDirection::IN : DashAclDirection::OUT); - string eni; - uint32_t stage; - if (!extractVariables(key, ':', eni, stage)) + DashAclStage stage; + if (!extractVariables(table_id, ':', eni, stage)) { - SWSS_LOG_WARN("Invalid key : %s", key.c_str()); + SWSS_LOG_ERROR("Invalid key : %s", table_id.c_str()); return task_failed; } - auto eni_entry = m_dash_orch->getEni(eni); - if (eni_entry == nullptr) - { - SWSS_LOG_WARN("eni %s cannot be found", eni.c_str()); - return task_failed; - } + DashAclTable& acl_table = (direction == DashAclDirection::IN) ? m_dash_acl_in_table : m_dash_acl_out_table; - auto itr = acl_bind_table.find(key); - if (itr == acl_bind_table.end() || itr->second.m_acl_group_id.empty()) + auto itr = acl_table.find(table_id); + if (itr == acl_table.end()) { - SWSS_LOG_WARN("ACL %s doesn't exist", key.c_str()); + SWSS_LOG_WARN("ACL %s doesn't exist", table_id.c_str()); return task_success; } - auto acl_bind = itr->second; - acl_bind_table.erase(itr); - - auto acl_group = getAclGroup(acl_bind.m_acl_group_id); - if (acl_group == nullptr) - { - SWSS_LOG_WARN("Invalid acl group id : %s", acl_bind.m_acl_group_id.c_str()); - return task_failed; - } - - sai_attribute_t attr; - attr.id = getSaiStage(direction, acl_group->m_ip_version, stage); - attr.value.oid = SAI_NULL_OBJECT_ID; + auto acl = itr->second; - sai_status_t status = sai_dash_eni_api->set_eni_attribute(eni_entry->eni_id, &attr); - if (status != SAI_STATUS_SUCCESS) + auto rv = m_group_mgr.unbind(acl.m_acl_group_id, eni, direction, stage); + if (rv != task_success) { - SWSS_LOG_WARN("Failed to unbind ACL %s to eni %s attribute, status : %s", key.c_str(), acl_bind.m_acl_group_id.c_str(), sai_serialize_status(status).c_str()); - return task_failed; + return rv; } - acl_group->m_ref_count--; - - SWSS_LOG_NOTICE("Unbind ACL group %s from %s", acl_bind.m_acl_group_id.c_str(), key.c_str()); + acl_table.erase(itr); - return task_success; + return rv; } diff --git a/orchagent/dash/dashaclorch.h b/orchagent/dash/dashaclorch.h index b3d417e9cd0..b3859c5c2d8 100644 --- a/orchagent/dash/dashaclorch.h +++ b/orchagent/dash/dashaclorch.h @@ -17,35 +17,18 @@ #include "zmqserver.h" #include "dashorch.h" +#include "dashaclgroupmgr.h" +#include "dashtagmgr.h" #include "dash_api/acl_group.pb.h" #include "dash_api/acl_rule.pb.h" #include "dash_api/acl_in.pb.h" #include "dash_api/acl_out.pb.h" -typedef enum _DashAclDirection -{ - IN, - OUT, -} DashAclDirection; - -struct DashAclBindEntry { +struct DashAclEntry { std::string m_acl_group_id; }; -struct DashAclGroupEntry { - sai_object_id_t m_dash_acl_group_id; - size_t m_ref_count; - size_t m_rule_count; - sai_ip_addr_family_t m_ip_version; -}; - -struct DashAclRuleEntry { - sai_object_id_t m_dash_acl_rule_id; -}; - -using DashAclBindTable = std::unordered_map; -using DashAclGroupTable = std::unordered_map; -using DashAclRuleTable = std::unordered_map; +using DashAclTable = std::unordered_map; class DashAclOrch : public ZmqOrch { @@ -53,6 +36,8 @@ class DashAclOrch : public ZmqOrch using TaskArgs = std::vector; DashAclOrch(swss::DBConnector *db, const std::vector &tables, DashOrch *dash_orch, swss::ZmqServer *zmqServer); + DashAclGroupMgr& getDashAclGroupMgr(); + DashTagMgr& getDashAclTagMgr(); private: void doTask(ConsumerBase &consumer); @@ -81,20 +66,26 @@ class DashAclOrch : public ZmqOrch task_process_status taskRemoveDashAclRule( const std::string &key); - DashAclGroupEntry* getAclGroup(const std::string &group_id); + task_process_status taskUpdateDashPrefixTag( + const std::string &key, + const dash::tag::PrefixTag &data); + + task_process_status taskRemoveDashPrefixTag( + const std::string &key); task_process_status bindAclToEni( - DashAclBindTable &acl_table, - const std::string &key, + DashAclDirection direction, + const std::string table_id, const std::string &acl_group_id); task_process_status unbindAclFromEni( - DashAclBindTable &acl_table, - const std::string &key); + DashAclDirection direction, + const std::string table_id); + + DashAclTable m_dash_acl_in_table; + DashAclTable m_dash_acl_out_table; - DashAclBindTable m_dash_acl_in_table; - DashAclBindTable m_dash_acl_out_table; - DashAclGroupTable m_dash_acl_group_table; - DashAclRuleTable m_dash_acl_rule_table; + DashAclGroupMgr m_group_mgr; + DashTagMgr m_tag_mgr; DashOrch *m_dash_orch; }; diff --git a/orchagent/dash/dashtagmgr.cpp b/orchagent/dash/dashtagmgr.cpp new file mode 100644 index 00000000000..834442395b5 --- /dev/null +++ b/orchagent/dash/dashtagmgr.cpp @@ -0,0 +1,150 @@ +#include "dashtagmgr.h" + +#include "dashaclorch.h" +#include "saihelper.h" + +using namespace std; +using namespace swss; + +bool from_pb(const dash::tag::PrefixTag& data, DashTag& tag) +{ + if (!to_sai(data.ip_version(), tag.m_ip_version)) + { + return false; + } + + if(!to_sai(data.prefix_list(), tag.m_prefixes)) + { + return false; + } + + return true; +} + +DashTagMgr::DashTagMgr(DashAclOrch *aclorch) : + m_dash_acl_orch(aclorch) +{ + SWSS_LOG_ENTER(); +} + +task_process_status DashTagMgr::create(const string& tag_id, const DashTag& tag) +{ + SWSS_LOG_ENTER(); + + if (exists(tag_id)) + { + return task_failed; + } + + m_tag_table.emplace(tag_id, tag); + + SWSS_LOG_INFO("Created prefix tag %s", tag_id.c_str()); + + return task_success; +} + +task_process_status DashTagMgr::update(const string& tag_id, const DashTag& new_tag) +{ + SWSS_LOG_ENTER(); + + SWSS_LOG_INFO("Updating existing prefix tag %s", tag_id.c_str()); + + auto tag_it = m_tag_table.find(tag_id); + if (tag_it == m_tag_table.end()) + { + SWSS_LOG_ERROR("Prefix tag %s does not exist ", tag_id.c_str()); + return task_failed; + } + + auto& tag = tag_it->second; + + if (tag.m_ip_version != new_tag.m_ip_version) + { + SWSS_LOG_WARN("'ip_version' changing is not supported for tag %s", tag_id.c_str()); + return task_failed; + } + + // Update tag prefixes + tag.m_prefixes = new_tag.m_prefixes; + + for (auto& group_it: tag.m_group_refcnt) + { + const auto& group_id = group_it.first; + m_dash_acl_orch->getDashAclGroupMgr().onUpdate(group_id, tag_id, tag); + } + + return task_success; +} + +task_process_status DashTagMgr::remove(const string& tag_id) +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + if (tag_it == m_tag_table.end()) + { + SWSS_LOG_WARN("Prefix tag %s does not exist ", tag_id.c_str()); + return task_success; + } + + if (!tag_it->second.m_group_refcnt.empty()) + { + SWSS_LOG_WARN("Prefix tag %s is still in use by ACL rule(s)", tag_id.c_str()); + return task_need_retry; + } + + m_tag_table.erase(tag_it); + + return task_success; +} + +bool DashTagMgr::exists(const string& tag_id) const +{ + SWSS_LOG_ENTER(); + + return m_tag_table.find(tag_id) != m_tag_table.end(); +} + +const vector& DashTagMgr::getPrefixes(const string& tag_id) const +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); + + return tag_it->second.m_prefixes; +} + +task_process_status DashTagMgr::attach(const string& tag_id, const string& group_id) +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); + auto& tag = tag_it->second; + + ++tag.m_group_refcnt[group_id]; + + SWSS_LOG_NOTICE("Tag %s is used by ACL group %s refcnt: %u", tag_id.c_str(), group_id.c_str(), tag.m_group_refcnt[group_id]); + return task_success; +} + +task_process_status DashTagMgr::detach(const string& tag_id, const string& group_id) +{ + SWSS_LOG_ENTER(); + + auto tag_it = m_tag_table.find(tag_id); + ABORT_IF_NOT(tag_it != m_tag_table.end(), "Tag %s does not exist", tag_id.c_str()); + auto& tag = tag_it->second; + auto group_it = tag.m_group_refcnt.find(group_id); + ABORT_IF_NOT(group_it != tag.m_group_refcnt.end(), "Group %s is not attached to the tag %s", group_id.c_str(), tag_id.c_str()); + + --group_it->second; + if (!group_it->second) + { + tag.m_group_refcnt.erase(group_it); + SWSS_LOG_NOTICE("Tag %s is no longer used by ACL group %s", tag_id.c_str(), group_id.c_str()); + } + + return task_success; +} diff --git a/orchagent/dash/dashtagmgr.h b/orchagent/dash/dashtagmgr.h new file mode 100644 index 00000000000..4b69efdaa8e --- /dev/null +++ b/orchagent/dash/dashtagmgr.h @@ -0,0 +1,44 @@ +#pragma once + +#include +#include + +#include +#include +#include + +#include "dashorch.h" +#include "pbutils.h" + +#include "dash_api/prefix_tag.pb.h" + +struct DashTag { + sai_ip_addr_family_t m_ip_version; + std::vector m_prefixes; + std::unordered_map m_group_refcnt; +}; + +bool from_pb(const dash::tag::PrefixTag& data, DashTag& tag); + +class DashAclOrch; + +class DashTagMgr +{ +public: + + DashTagMgr(DashAclOrch *aclorch); + + task_process_status create(const std::string& tag_id, const DashTag& tag); + task_process_status update(const std::string& tag_id, const DashTag& tag); + task_process_status remove(const std::string& tag_id); + bool exists(const std::string& tag_id) const; + + const std::vector& getPrefixes(const std::string& tag_id) const; + + task_process_status attach(const std::string& tag_id, const std::string& group_id); + task_process_status detach(const std::string& tag_id, const std::string& group_id); + +private: + DashAclOrch *m_dash_acl_orch; + std::unordered_map m_tag_table; +}; diff --git a/orchagent/dash/pbutils.cpp b/orchagent/dash/pbutils.cpp index e030382531b..e8cd98f9e81 100644 --- a/orchagent/dash/pbutils.cpp +++ b/orchagent/dash/pbutils.cpp @@ -5,6 +5,23 @@ using namespace std; using namespace swss; using namespace google::protobuf; +bool to_sai(const dash::types::IpVersion &pb_version, sai_ip_addr_family_t &sai_ip_family) +{ + switch (pb_version) + { + case dash::types::IP_VERSION_IPV4: + sai_ip_family = SAI_IP_ADDR_FAMILY_IPV4; + break; + case dash::types::IP_VERSION_IPV6: + sai_ip_family = SAI_IP_ADDR_FAMILY_IPV6; + break; + default: + return false; + } + + return true; +} + bool to_sai(const dash::types::IpAddress &pb_address, sai_ip_address_t &sai_address) { SWSS_LOG_ENTER(); diff --git a/orchagent/dash/pbutils.h b/orchagent/dash/pbutils.h index daae486b4ae..080cac46666 100644 --- a/orchagent/dash/pbutils.h +++ b/orchagent/dash/pbutils.h @@ -10,6 +10,8 @@ #include "dash_api/types.pb.h" +bool to_sai(const dash::types::IpVersion &pb_version, sai_ip_addr_family_t &sai_ip_family); + bool to_sai(const dash::types::IpAddress &pb_address, sai_ip_address_t &sai_address); bool to_sai(const dash::types::IpPrefix &pb_prefix, sai_ip_prefix_t &sai_prefix); @@ -40,7 +42,7 @@ bool to_sai(const dash::types::ValueOrRange &pb_range, RangeType &sai_range) { if (pb_range.value() < std::numeric_limits::min() || pb_range.value() > std::numeric_limits::max()) { - SWSS_LOG_WARN("The value %s is invalid", pb_range.value()); + SWSS_LOG_WARN("The value %s is invalid", std::to_string(pb_range.value()).c_str()); return false; } sai_range.min = static_cast(pb_range.value()); diff --git a/orchagent/main.cpp b/orchagent/main.cpp index 0add517a05d..df702e7c3f3 100644 --- a/orchagent/main.cpp +++ b/orchagent/main.cpp @@ -505,6 +505,10 @@ int main(int argc, char **argv) attr.value.ptr = (void *)on_switch_shutdown_request; attrs.push_back(attr); + attr.id = SAI_SWITCH_ATTR_PORT_HOST_TX_READY_NOTIFY; + attr.value.ptr = (void *)on_port_host_tx_ready; + attrs.push_back(attr); + if (gMySwitchType != "fabric" && gMacAddress) { attr.id = SAI_SWITCH_ATTR_SRC_MAC_ADDRESS; diff --git a/orchagent/muxorch.cpp b/orchagent/muxorch.cpp index 0a6af3a541d..72b1a32fa60 100644 --- a/orchagent/muxorch.cpp +++ b/orchagent/muxorch.cpp @@ -669,6 +669,7 @@ void MuxCable::updateNeighbor(NextHopKey nh, bool add) */ void MuxCable::updateRoutes() { + SWSS_LOG_INFO("Updating routes pointing to multiple mux nexthops"); MuxNeighbor neighbors = nbr_handler_->getNeighbors(); string alias = nbr_handler_->getAlias(); for (auto nh = neighbors.begin(); nh != neighbors.end(); nh ++) @@ -1098,6 +1099,8 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) if (!add) { + SWSS_LOG_INFO("Removing route %s from mux_multi_active_nh_table", + pfx.to_string().c_str()); mux_multi_active_nh_table.erase(pfx); return; } @@ -1110,6 +1113,7 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) */ if (nhg_key.getSize() <= 1) { + SWSS_LOG_INFO("Route points to single nexthop, ignoring"); return; } @@ -1121,24 +1125,6 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) /* get nexthops from nexthop group */ nextHops = nhg_key.getNextHops(); - auto it = mux_multi_active_nh_table.find(pfx); - if (it != mux_multi_active_nh_table.end()) - { - /* This will only work for configured MUX neighbors (most cases) - * TODO: add way to find MUX from neighbor - */ - MuxCable* cable = findMuxCableInSubnet(it->second.ip_address); - auto standalone = standalone_tunnel_neighbors_.find(it->second.ip_address); - - if ((cable == nullptr && standalone == standalone_tunnel_neighbors_.end()) || - cable->isActive()) - { - SWSS_LOG_INFO("Route %s pointing to active neighbor %s", - pfx.to_string().c_str(), it->second.to_string().c_str()); - return; - } - } - SWSS_LOG_NOTICE("Updating route %s pointing to Mux nexthops %s", pfx.to_string().c_str(), nhg_key.to_string().c_str()); @@ -1167,7 +1153,7 @@ void MuxOrch::updateRoute(const IpPrefix &pfx, bool add) pfx.to_string().c_str(), nexthop.to_string().c_str()); continue; } - SWSS_LOG_INFO("setting route %s with nexthop %s %" PRIx64 "", + SWSS_LOG_NOTICE("setting route %s with nexthop %s %" PRIx64 "", pfx.to_string().c_str(), nexthop.to_string().c_str(), next_hop_id); mux_multi_active_nh_table[pfx] = nexthop; active_found = true; @@ -1442,9 +1428,11 @@ bool MuxOrch::isMuxNexthops(const NextHopGroupKey& nextHops) { if (this->containsNextHop(*it)) { + SWSS_LOG_INFO("Found mux nexthop %s", it->to_string().c_str()); return true; } } + SWSS_LOG_INFO("No mux nexthop found"); return false; } diff --git a/orchagent/notifications.cpp b/orchagent/notifications.cpp index 297b8d48506..5fbae21c288 100644 --- a/orchagent/notifications.cpp +++ b/orchagent/notifications.cpp @@ -5,6 +5,10 @@ extern "C" { #include "logger.h" #include "notifications.h" +#ifdef ASAN_ENABLED +#include +#endif + void on_fdb_event(uint32_t count, sai_fdb_event_notification_data_t *data) { // don't use this event handler, because it runs by libsairedis in a separate thread @@ -30,5 +34,22 @@ void on_switch_shutdown_request(sai_object_id_t switch_id) /* TODO: Later a better restart story will be told here */ SWSS_LOG_ERROR("Syncd stopped"); - exit(EXIT_FAILURE); + /* + The quick_exit() is used instead of the exit() to avoid a following data race: + * the exit() calls the destructors for global static variables (e.g.BufferOrch::m_buffer_type_maps) + * in parallel to that, orchagent accesses the global static variables + Since quick_exit doesn't call atexit() flows, the LSAN check is called explicitly via __lsan_do_leak_check() + */ + +#ifdef ASAN_ENABLED + __lsan_do_leak_check(); +#endif + + quick_exit(EXIT_FAILURE); } + +void on_port_host_tx_ready(sai_object_id_t switch_id, sai_object_id_t port_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus) +{ + // don't use this event handler, because it runs by libsairedis in a separate thread + // which causes concurrency access to the DB +} \ No newline at end of file diff --git a/orchagent/notifications.h b/orchagent/notifications.h index 61e8422db02..81d49efee0e 100644 --- a/orchagent/notifications.h +++ b/orchagent/notifications.h @@ -11,3 +11,4 @@ void on_bfd_session_state_change(uint32_t count, sai_bfd_session_state_notificat // The function prototype information can be found here: // https://github.com/sonic-net/sonic-sairedis/blob/master/meta/NotificationSwitchShutdownRequest.cpp#L49 void on_switch_shutdown_request(sai_object_id_t switch_id); +void on_port_host_tx_ready(sai_object_id_t switch_id, sai_object_id_t port_id, sai_port_host_tx_ready_status_t m_portHostTxReadyStatus); diff --git a/orchagent/orch.cpp b/orchagent/orch.cpp index 869c404780b..1e33d7c5ebf 100644 --- a/orchagent/orch.cpp +++ b/orchagent/orch.cpp @@ -156,7 +156,7 @@ size_t ConsumerBase::addToSync(const std::deque &entries } // TODO: Table should be const -size_t Consumer::refillToSync(Table* table) +size_t ConsumerBase::refillToSync(Table* table) { std::deque entries; vector keys; @@ -178,7 +178,7 @@ size_t Consumer::refillToSync(Table* table) return addToSync(entries); } -size_t Consumer::refillToSync() +size_t ConsumerBase::refillToSync() { auto subTable = dynamic_cast(getSelectable()); if (subTable != NULL) @@ -194,14 +194,23 @@ size_t Consumer::refillToSync() } while (update_size != 0); return total_size; } - else + string tableName = getTableName(); + auto consumerTable = dynamic_cast(getSelectable()); + if (consumerTable != NULL) { // consumerTable is either ConsumerStateTable or ConsumerTable - auto db = getDbConnector(); - string tableName = getTableName(); + auto db = consumerTable->getDbConnector(); + auto table = Table(db, tableName); + return refillToSync(&table); + } + auto zmqTable = dynamic_cast(getSelectable()); + if (zmqTable != NULL) + { + auto db = zmqTable->getDbConnector(); auto table = Table(db, tableName); return refillToSync(&table); } + return 0; } string ConsumerBase::dumpTuple(const KeyOpFieldsValuesTuple &tuple) @@ -253,7 +262,7 @@ void Consumer::drain() size_t Orch::addExistingData(const string& tableName) { - auto consumer = dynamic_cast(getExecutor(tableName)); + auto consumer = dynamic_cast(getExecutor(tableName)); if (consumer == NULL) { SWSS_LOG_ERROR("No consumer %s in Orch", tableName.c_str()); @@ -267,7 +276,7 @@ size_t Orch::addExistingData(const string& tableName) size_t Orch::addExistingData(Table *table) { string tableName = table->getTableName(); - Consumer* consumer = dynamic_cast(getExecutor(tableName)); + ConsumerBase* consumer = dynamic_cast(getExecutor(tableName)); if (consumer == NULL) { SWSS_LOG_ERROR("No consumer %s in Orch", tableName.c_str()); @@ -285,7 +294,7 @@ bool Orch::bake() { string executorName = it.first; auto executor = it.second; - auto consumer = dynamic_cast(executor.get()); + auto consumer = dynamic_cast(executor.get()); if (consumer == NULL) { continue; @@ -537,7 +546,7 @@ void Orch::dumpPendingTasks(vector &ts) { for (auto &it : m_consumerMap) { - Consumer* consumer = dynamic_cast(it.second.get()); + ConsumerBase* consumer = dynamic_cast(it.second.get()); if (consumer == NULL) { SWSS_LOG_DEBUG("Executor is not a Consumer"); diff --git a/orchagent/orch.h b/orchagent/orch.h index b813d133edc..6e4702ce3de 100644 --- a/orchagent/orch.h +++ b/orchagent/orch.h @@ -163,6 +163,9 @@ class ConsumerBase : public Executor { // Returns: the number of entries added to m_toSync size_t addToSync(const std::deque &entries); + + size_t refillToSync(); + size_t refillToSync(swss::Table* table); }; class Consumer : public ConsumerBase { @@ -194,8 +197,6 @@ class Consumer : public ConsumerBase { return getDbConnector()->getDbName(); } - size_t refillToSync(); - size_t refillToSync(swss::Table* table); void execute() override; void drain() override; }; diff --git a/orchagent/orchdaemon.cpp b/orchagent/orchdaemon.cpp index 6787fdf8461..ad75596b4c1 100644 --- a/orchagent/orchdaemon.cpp +++ b/orchagent/orchdaemon.cpp @@ -128,6 +128,7 @@ bool OrchDaemon::init() vector ports_tables = { { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_SEND_TO_INGRESS_PORT_TABLE_NAME, portsorch_base_pri + 5 }, { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, @@ -257,6 +258,7 @@ bool OrchDaemon::init() gDirectory.set(dash_route_orch); vector dash_acl_tables = { + APP_DASH_PREFIX_TAG_TABLE_NAME, APP_DASH_ACL_IN_TABLE_NAME, APP_DASH_ACL_OUT_TABLE_NAME, APP_DASH_ACL_GROUP_TABLE_NAME, @@ -863,7 +865,7 @@ void OrchDaemon::start() flush(); SWSS_LOG_WARN("Orchagent is frozen for warm restart!"); - sleep(UINT_MAX); + freezeAndHeartBeat(UINT_MAX); } } } @@ -1032,6 +1034,19 @@ void OrchDaemon::heartBeat(std::chrono::time_point 0) + { + // Send heartbeat message to prevent Orchagent stuck alert. + auto tend = std::chrono::high_resolution_clock::now(); + heartBeat(tend); + + duration--; + sleep(1); + } +} + FabricOrchDaemon::FabricOrchDaemon(DBConnector *applDb, DBConnector *configDb, DBConnector *stateDb, DBConnector *chassisAppDb, ZmqServer *zmqServer) : OrchDaemon(applDb, configDb, stateDb, chassisAppDb, zmqServer), m_applDb(applDb), diff --git a/orchagent/orchdaemon.h b/orchagent/orchdaemon.h index 27c9a69c7db..803a720c3cb 100644 --- a/orchagent/orchdaemon.h +++ b/orchagent/orchdaemon.h @@ -101,6 +101,8 @@ class OrchDaemon void flush(); void heartBeat(std::chrono::time_point tcurrent); + + void freezeAndHeartBeat(unsigned int duration); }; class FabricOrchDaemon : public OrchDaemon diff --git a/orchagent/p4orch/tests/Makefile.am b/orchagent/p4orch/tests/Makefile.am index 671aa9eb800..d541bbe6372 100644 --- a/orchagent/p4orch/tests/Makefile.am +++ b/orchagent/p4orch/tests/Makefile.am @@ -33,6 +33,8 @@ p4orch_tests_SOURCES = $(ORCHAGENT_DIR)/orch.cpp \ $(top_srcdir)/lib/recorder.cpp \ $(ORCHAGENT_DIR)/flex_counter/flex_counter_manager.cpp \ $(ORCHAGENT_DIR)/flex_counter/flow_counter_handler.cpp \ + $(ORCHAGENT_DIR)/port/port_capabilities.cpp \ + $(ORCHAGENT_DIR)/port/porthlpr.cpp \ $(P4ORCH_DIR)/p4oidmapper.cpp \ $(P4ORCH_DIR)/p4orch.cpp \ $(P4ORCH_DIR)/p4orch_util.cpp \ diff --git a/orchagent/pfc_detect_mellanox.lua b/orchagent/pfc_detect_mellanox.lua index e805ad9cff1..826a577d623 100644 --- a/orchagent/pfc_detect_mellanox.lua +++ b/orchagent/pfc_detect_mellanox.lua @@ -12,6 +12,17 @@ local rets = {} redis.call('SELECT', counters_db) +-- Record the polling time +local timestamp_last = redis.call('HGET', 'TIMESTAMP', 'pfcwd_poll_timestamp_last') +local timestamp_struct = redis.call('TIME') +local timestamp_current = timestamp_struct[1] + timestamp_struct[2] / 1000000 +local timestamp_string = tostring(timestamp_current) +redis.call('HSET', 'TIMESTAMP', 'pfcwd_poll_timestamp_last', timestamp_string) +local real_poll_time = poll_time +if timestamp_last ~= false then + real_poll_time = (timestamp_current - tonumber(timestamp_last)) * 1000000 +end + -- Iterate through each queue local n = table.getn(KEYS) for i = n, 1, -1 do @@ -78,7 +89,12 @@ for i = n, 1, -1 do if time_left <= poll_time then redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_rx_pkt_key .. '_last') redis.call('HDEL', counters_table_name .. ':' .. port_id, pfc_duration_key .. '_last') - redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm"]') + local occupancy_string = '"occupancy","' .. tostring(occupancy_bytes) .. '",' + local packets_string = '"packets","' .. tostring(packets) .. '","packets_last","' .. tostring(packets_last) .. '",' + local pfc_rx_packets_string = '"pfc_rx_packets","' .. tostring(pfc_rx_packets) .. '","pfc_rx_packets_last","' .. tostring(pfc_rx_packets_last) .. '",' + local storm_condition_string = '"pfc_duration","' .. tostring(pfc_duration) .. '","pfc_duration_last","' .. tostring(pfc_duration_last) .. '",' + local timestamps = '"timestamp","' .. timestamp_string .. '","timestamp_last","' .. timestamp_last .. '","real_poll_time","' .. real_poll_time .. '"' + redis.call('PUBLISH', 'PFC_WD_ACTION', '["' .. KEYS[i] .. '","storm",' .. occupancy_string .. packets_string .. pfc_rx_packets_string .. storm_condition_string .. timestamps .. ']') is_deadlock = true time_left = detection_time else diff --git a/orchagent/pfcwdorch.cpp b/orchagent/pfcwdorch.cpp index 1d662e97f41..7c78f81d6ba 100644 --- a/orchagent/pfcwdorch.cpp +++ b/orchagent/pfcwdorch.cpp @@ -913,10 +913,20 @@ void PfcWdSwOrch::doTask(swss::NotificationConsumer wdNotification.pop(queueIdStr, event, values); + string info; + for (auto &fv : values) + { + info += fvField(fv) + ":" + fvValue(fv) + "|"; + } + if (!info.empty()) + { + info.pop_back(); + } + sai_object_id_t queueId = SAI_NULL_OBJECT_ID; sai_deserialize_object_id(queueIdStr, queueId); - if (!startWdActionOnQueue(event, queueId)) + if (!startWdActionOnQueue(event, queueId, info)) { SWSS_LOG_ERROR("Failed to start PFC watchdog %s event action on queue %s", event.c_str(), queueIdStr.c_str()); } @@ -939,7 +949,7 @@ void PfcWdSwOrch::doTask(SelectableTimer &timer) template void PfcWdSwOrch::report_pfc_storm( - sai_object_id_t id, const PfcWdQueueEntry *entry) + sai_object_id_t id, const PfcWdQueueEntry *entry, const string &info) { event_params_t params = { { "ifname", entry->portAlias }, @@ -947,18 +957,32 @@ void PfcWdSwOrch::report_pfc_storm( { "queue_id", to_string(id) }, { "port_id", to_string(entry->portId) }}; - SWSS_LOG_NOTICE( - "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ".", + if (info.empty()) + { + SWSS_LOG_NOTICE( + "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64, entry->portAlias.c_str(), entry->index, id, entry->portId); + } + else + { + SWSS_LOG_NOTICE( + "PFC Watchdog detected PFC storm on port %s, queue index %d, queue id 0x%" PRIx64 " and port id 0x%" PRIx64 ", additional info: %s.", + entry->portAlias.c_str(), + entry->index, + id, + entry->portId, + info.c_str()); + params["additional_info"] = info; + } event_publish(g_events_handle, "pfc-storm", ¶ms); } template -bool PfcWdSwOrch::startWdActionOnQueue(const string &event, sai_object_id_t queueId) +bool PfcWdSwOrch::startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info) { auto entry = m_entryMap.find(queueId); if (entry == m_entryMap.end()) @@ -979,7 +1003,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - report_pfc_storm(entry->first, &entry->second); + report_pfc_storm(entry->first, &entry->second, info); entry->second.handler = make_shared( entry->second.portId, @@ -996,7 +1020,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - report_pfc_storm(entry->first, &entry->second); + report_pfc_storm(entry->first, &entry->second, info); entry->second.handler = make_shared( entry->second.portId, @@ -1013,7 +1037,7 @@ bool PfcWdSwOrch::startWdActionOnQueue(const string { if (entry->second.handler == nullptr) { - report_pfc_storm(entry->first, &entry->second); + report_pfc_storm(entry->first, &entry->second, info); entry->second.handler = make_shared( entry->second.portId, diff --git a/orchagent/pfcwdorch.h b/orchagent/pfcwdorch.h index 8c30d7068e5..935582289c9 100644 --- a/orchagent/pfcwdorch.h +++ b/orchagent/pfcwdorch.h @@ -60,7 +60,7 @@ class PfcWdOrch: public Orch void setPfcDlrPacketAction(PfcWdAction action) { PfcDlrPacketAction = action; } protected: - virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId) = 0; + virtual bool startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info="") = 0; string m_platform = ""; private: @@ -96,7 +96,7 @@ class PfcWdSwOrch: public PfcWdOrch void doTask() override; protected: - bool startWdActionOnQueue(const string &event, sai_object_id_t queueId) override; + bool startWdActionOnQueue(const string &event, sai_object_id_t queueId, const string &info="") override; private: struct PfcWdQueueEntry @@ -128,7 +128,7 @@ class PfcWdSwOrch: public PfcWdOrch void enableBigRedSwitchMode(); void setBigRedSwitchMode(string value); - void report_pfc_storm(sai_object_id_t id, const PfcWdQueueEntry *); + void report_pfc_storm(sai_object_id_t id, const PfcWdQueueEntry *, const string&); map m_entryMap; map m_brsEntryMap; diff --git a/orchagent/port/port_capabilities.cpp b/orchagent/port/port_capabilities.cpp new file mode 100644 index 00000000000..a55334d9f1a --- /dev/null +++ b/orchagent/port/port_capabilities.cpp @@ -0,0 +1,78 @@ +// includes ----------------------------------------------------------------------------------------------------------- + +extern "C" { +#include +} + +#include + +#include +#include + +#include "port_capabilities.h" + +using namespace swss; + +// variables ---------------------------------------------------------------------------------------------------------- + +extern sai_object_id_t gSwitchId; + +// functions ---------------------------------------------------------------------------------------------------------- + +static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) noexcept +{ + const auto *meta = sai_metadata_get_attr_metadata(objType, attrId); + + return meta != nullptr ? meta->attridname : "UNKNOWN"; +} + +// Port capabilities -------------------------------------------------------------------------------------------------- + +PortCapabilities::PortCapabilities() +{ + queryPortAttrCapabilities(portCapabilities.pfc, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL); + queryPortAttrCapabilities(portCapabilities.pfcTx, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_TX); + queryPortAttrCapabilities(portCapabilities.pfcRx, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX); + queryPortAttrCapabilities(portCapabilities.pfcMode, SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE); +} + +bool PortCapabilities::isPortPfcAsymSupported() const +{ + auto supported = portCapabilities.pfcMode.attrCap.set_implemented; + supported = supported && portCapabilities.pfc.attrCap.set_implemented; + supported = supported && portCapabilities.pfcTx.attrCap.set_implemented; + supported = supported && portCapabilities.pfcRx.attrCap.set_implemented; + + return supported; +} + +sai_status_t PortCapabilities::queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const +{ + return sai_query_attribute_capability(gSwitchId, objType, attrId, &attrCap); +} + +template +void PortCapabilities::queryPortAttrCapabilities(T &obj, sai_port_attr_t attrId) +{ + SWSS_LOG_ENTER(); + + auto status = queryAttrCapabilitiesSai( + obj.attrCap, SAI_OBJECT_TYPE_PORT, attrId + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_PORT, attrId).c_str() + ); + return; + } + + if (!obj.attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_PORT, attrId).c_str() + ); + } +} diff --git a/orchagent/port/port_capabilities.h b/orchagent/port/port_capabilities.h new file mode 100644 index 00000000000..e937e7b943c --- /dev/null +++ b/orchagent/port/port_capabilities.h @@ -0,0 +1,41 @@ +#pragma once + +extern "C" { +#include +#include +#include +} + +class PortCapabilities final +{ +public: + PortCapabilities(); + ~PortCapabilities() = default; + + bool isPortPfcAsymSupported() const; + +private: + sai_status_t queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const; + + template + void queryPortAttrCapabilities(T &obj, sai_port_attr_t attrId); + + // Port SAI capabilities + struct { + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfcRx; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_RX + + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfcTx; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_TX + + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfc; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL + + struct { + sai_attr_capability_t attrCap = { false, false, false }; + } pfcMode; // SAI_PORT_ATTR_PRIORITY_FLOW_CONTROL_MODE + } portCapabilities; +}; diff --git a/orchagent/port/portcnt.h b/orchagent/port/portcnt.h index 2c276370a83..26c8a603f64 100644 --- a/orchagent/port/portcnt.h +++ b/orchagent/port/portcnt.h @@ -155,6 +155,36 @@ class PortConfig final bool is_set = false; } attn; // Port serdes attn + struct { + std::vector value; + bool is_set = false; + } ob_m2lp; // Port serdes ob_m2lp + + struct { + std::vector value; + bool is_set = false; + } ob_alev_out; // Port serdes ob_alev_out + + struct { + std::vector value; + bool is_set = false; + } obplev; // Port serdes obplev + + struct { + std::vector value; + bool is_set = false; + } obnlev; // Port serdes obnlev + + struct { + std::vector value; + bool is_set = false; + } regn_bfm1p; // Port serdes regn_bfm1p + + struct { + std::vector value; + bool is_set = false; + } regn_bfm1n; // Port serdes regn_bfm1n + } serdes; // Port serdes struct { diff --git a/orchagent/port/porthlpr.cpp b/orchagent/port/porthlpr.cpp index c64e6fac4aa..64c05b2aec0 100644 --- a/orchagent/port/porthlpr.cpp +++ b/orchagent/port/porthlpr.cpp @@ -683,6 +683,14 @@ template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post1) &serdes, template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post2) &serdes, const std::string &field, const std::string &value) const; template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::post3) &serdes, const std::string &field, const std::string &value) const; template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::attn) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::ob_m2lp) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::ob_alev_out) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::obplev) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::obnlev) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::regn_bfm1p) &serdes, const std::string &field, const std::string &value) const; +template bool PortHelper::parsePortSerdes(decltype(PortSerdes_t::regn_bfm1n) &serdes, const std::string &field, const std::string &value) const; + + bool PortHelper::parsePortRole(PortConfig &port, const std::string &field, const std::string &value) const { @@ -924,6 +932,48 @@ bool PortHelper::parsePortConfig(PortConfig &port) const return false; } } + else if (field == PORT_OB_M2LP) + { + if (!this->parsePortSerdes(port.serdes.ob_m2lp, field, value)) + { + return false; + } + } + else if (field == PORT_OB_ALEV_OUT) + { + if (!this->parsePortSerdes(port.serdes.ob_alev_out, field, value)) + { + return false; + } + } + else if (field == PORT_OBPLEV) + { + if (!this->parsePortSerdes(port.serdes.obplev, field, value)) + { + return false; + } + } + else if (field == PORT_OBNLEV) + { + if (!this->parsePortSerdes(port.serdes.obnlev, field, value)) + { + return false; + } + } + else if (field == PORT_REGN_BFM1P) + { + if (!this->parsePortSerdes(port.serdes.regn_bfm1p, field, value)) + { + return false; + } + } + else if (field == PORT_REGN_BFM1N) + { + if (!this->parsePortSerdes(port.serdes.regn_bfm1n, field, value)) + { + return false; + } + } else if (field == PORT_ROLE) { if (!this->parsePortRole(port, field, value)) diff --git a/orchagent/port/portschema.h b/orchagent/port/portschema.h index 4aabf39fbc0..56b2541c375 100644 --- a/orchagent/port/portschema.h +++ b/orchagent/port/portschema.h @@ -77,6 +77,12 @@ #define PORT_POST2 "post2" #define PORT_POST3 "post3" #define PORT_ATTN "attn" +#define PORT_OB_M2LP "ob_m2lp" +#define PORT_OB_ALEV_OUT "ob_alev_out" +#define PORT_OBPLEV "obplev" +#define PORT_OBNLEV "obnlev" +#define PORT_REGN_BFM1P "regn_bfm1p" +#define PORT_REGN_BFM1N "regn_bfm1n" #define PORT_ROLE "role" #define PORT_ADMIN_STATUS "admin_status" #define PORT_DESCRIPTION "description" diff --git a/orchagent/portsorch.cpp b/orchagent/portsorch.cpp index 951d3ff8641..6a14f6a0f22 100755 --- a/orchagent/portsorch.cpp +++ b/orchagent/portsorch.cpp @@ -339,6 +339,39 @@ static void getPortSerdesAttr(PortSerdesAttrMap_t &map, const PortConfig &port) { map[SAI_PORT_SERDES_ATTR_TX_FIR_ATTN] = port.serdes.attn.value; } + + if (port.serdes.ob_m2lp.is_set) + { + + map[SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO] = port.serdes.ob_m2lp.value; + } + + if (port.serdes.ob_alev_out.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE] = port.serdes.ob_alev_out.value; + } + + if (port.serdes.obplev.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE] = port.serdes.obplev.value; + } + + if (port.serdes.obnlev.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE] = port.serdes.obnlev.value; + } + + if (port.serdes.regn_bfm1p.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG] = port.serdes.regn_bfm1p.value; + } + + if (port.serdes.regn_bfm1n.is_set) + { + map[SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG] = port.serdes.regn_bfm1n.value; + } + + } // Port OA ------------------------------------------------------------------------------------------------------------ @@ -358,6 +391,8 @@ static void getPortSerdesAttr(PortSerdesAttrMap_t &map, const PortConfig &port) * bridge. By design, SONiC switch starts with all bridge ports removed from * default VLAN and all ports removed from .1Q bridge. */ + + PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &tableNames, DBConnector *chassisAppDb) : Orch(db, tableNames), m_portStateTable(stateDb, STATE_PORT_TABLE_NAME), @@ -384,6 +419,7 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector(new Table(db, APP_PORT_TABLE_NAME)); + m_sendToIngressPortTable = unique_ptr(new Table(db, APP_SEND_TO_INGRESS_PORT_TABLE_NAME)); /* Initialize gearbox */ m_gearboxTable = unique_ptr
(new Table(db, "_GEARBOX_TABLE")); @@ -498,6 +534,42 @@ PortsOrch::PortsOrch(DBConnector *db, DBConnector *stateDb, vector &portList) attrList.push_back(attr); } + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrList.push_back(attr); + } + attrDataList.push_back(attrList); attrCountList.push_back(static_cast(attrDataList.back().size())); attrPtrList.push_back(attrDataList.back().data()); @@ -1366,8 +1452,11 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) attr.id = SAI_PORT_ATTR_ADMIN_STATE; attr.value.booldata = state; + // if sync between cmis module configuration and asic is supported, + // do not change host_tx_ready value in STATE DB when admin status is changed. + /* Update the host_tx_ready to false before setting admin_state, when admin state is false */ - if (!state) + if (!state && !m_cmisModuleAsicSyncSupported) { m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); SWSS_LOG_NOTICE("Set admin status DOWN host_tx_ready to false for port %s", @@ -1380,7 +1469,11 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) SWSS_LOG_ERROR("Failed to set admin status %s for port %s." " Setting host_tx_ready as false", state ? "UP" : "DOWN", port.m_alias.c_str()); - m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + + if (!m_cmisModuleAsicSyncSupported) + { + m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); + } task_process_status handle_status = handleSaiSetStatus(SAI_API_PORT, status); if (handle_status != task_success) { @@ -1389,7 +1482,7 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) } bool gbstatus = setGearboxPortsAttr(port, SAI_PORT_ATTR_ADMIN_STATE, &state); - if (gbstatus != true) + if (gbstatus != true && !m_cmisModuleAsicSyncSupported) { m_portStateTable.hset(port.m_alias, "host_tx_ready", "false"); SWSS_LOG_NOTICE("Set host_tx_ready to false as gbstatus is false " @@ -1397,7 +1490,7 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) } /* Update the state table for host_tx_ready*/ - if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) ) + if (state && (gbstatus == true) && (status == SAI_STATUS_SUCCESS) && !m_cmisModuleAsicSyncSupported) { m_portStateTable.hset(port.m_alias, "host_tx_ready", "true"); SWSS_LOG_NOTICE("Set admin status UP host_tx_ready to true for port %s", @@ -1407,6 +1500,20 @@ bool PortsOrch::setPortAdminStatus(Port &port, bool state) return true; } +void PortsOrch::setHostTxReady(sai_object_id_t portId, const std::string &status) +{ + Port p; + + if (!getPort(portId, p)) + { + SWSS_LOG_ERROR("Failed to get port object for port id 0x%" PRIx64, portId); + return; + } + + SWSS_LOG_NOTICE("Setting host_tx_ready status = %s, port_id = 0x%" PRIx64, status.c_str(), portId); + m_portStateTable.hset(p.m_alias, "host_tx_ready", status); +} + bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) { SWSS_LOG_ENTER(); @@ -1432,6 +1539,25 @@ bool PortsOrch::getPortAdminStatus(sai_object_id_t id, bool &up) return true; } +bool PortsOrch::getPortHostTxReady(const Port& port, bool &hostTxReadyVal) +{ + SWSS_LOG_ENTER(); + + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_READY_STATUS; + + sai_status_t status = sai_port_api->get_port_attribute(port.m_port_id, 1, &attr); + if (status != SAI_STATUS_SUCCESS) + { + hostTxReadyVal = false; + return false; + } + + hostTxReadyVal = attr.value.s32; + + return true; +} + bool PortsOrch::getPortMtu(const Port& port, sai_uint32_t &mtu) { SWSS_LOG_ENTER(); @@ -3250,6 +3376,68 @@ void PortsOrch::removePortFromPortListMap(sai_object_id_t port_id) } } +void PortsOrch::doSendToIngressPortTask(Consumer &consumer) +{ + SWSS_LOG_ENTER(); + + auto it = consumer.m_toSync.begin(); + while (it != consumer.m_toSync.end()) + { + auto &t = it->second; + + string alias = kfvKey(t); + string op = kfvOp(t); + ReturnCode rc; + std::vector app_state_db_attrs; + + if (op == SET_COMMAND) + { + if (m_isSendToIngressPortConfigured) + { + rc = ReturnCode(StatusCode::SWSS_RC_UNIMPLEMENTED) + << "Update operation on SendToIngress port with alias=" + << alias << " is not suported"; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + m_publisher.publish(consumer.getTableName(), kfvKey(t), + kfvFieldsValues(t), rc); + it = consumer.m_toSync.erase(it); + continue; + } + rc = addSendToIngressHostIf(alias); + if (!rc.ok()) + { + SWSS_LOG_ERROR("%s", rc.message().c_str()); + } + else + { + m_isSendToIngressPortConfigured = true; + } + } + else if (op == DEL_COMMAND) + { + // For SendToIngress port, delete the host interface and unbind from the CPU port + rc = removeSendToIngressHostIf(); + if (!rc.ok()) + { + SWSS_LOG_ERROR("Failed to remove SendToIngress port rc=%s", + rc.message().c_str()); + } + else + { + m_isSendToIngressPortConfigured = false; + } + } + else + { + rc = ReturnCode(StatusCode::SWSS_RC_INVALID_PARAM) << + "Unknown operation type " << op; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + } + m_publisher.publish(consumer.getTableName(), kfvKey(t), + kfvFieldsValues(t), rc); + it = consumer.m_toSync.erase(it); + } +} void PortsOrch::doPortTask(Consumer &consumer) { @@ -3953,24 +4141,34 @@ void PortsOrch::doPortTask(Consumer &consumer) { if (!p.m_pfc_asym_cfg || p.m_pfc_asym != pCfg.pfc_asym.value) { - if (!setPortPfcAsym(p, pCfg.pfc_asym.value)) + if (m_portCap.isPortPfcAsymSupported()) { - SWSS_LOG_ERROR( - "Failed to set port %s asymmetric PFC to %s", + if (!setPortPfcAsym(p, pCfg.pfc_asym.value)) + { + SWSS_LOG_ERROR( + "Failed to set port %s asymmetric PFC to %s", + p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() + ); + it++; + continue; + } + + p.m_pfc_asym = pCfg.pfc_asym.value; + p.m_pfc_asym_cfg = true; + m_portList[p.m_alias] = p; + + SWSS_LOG_NOTICE( + "Set port %s asymmetric PFC to %s", p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() ); - it++; - continue; } - - p.m_pfc_asym = pCfg.pfc_asym.value; - p.m_pfc_asym_cfg = true; - m_portList[p.m_alias] = p; - - SWSS_LOG_NOTICE( - "Set port %s asymmetric PFC to %s", - p.m_alias.c_str(), m_portHlpr.getPfcAsymStr(pCfg).c_str() - ); + else + { + SWSS_LOG_WARN( + "Port %s asymmetric PFC configuration is not supported: skipping ...", + p.m_alias.c_str() + ); + } } } @@ -4363,6 +4561,69 @@ void PortsOrch::doVlanMemberTask(Consumer &consumer) } } +void PortsOrch::doTransceiverInfoTableTask(Consumer &consumer) +{ + /* + the idea is to listen to transceiver info table, and also maintain an internal list of plugged modules. + + */ + SWSS_LOG_ENTER(); + + string table_name = consumer.getTableName(); + + auto it = consumer.m_toSync.begin(); + while(it != consumer.m_toSync.end()) + { + auto t = it->second; + string alias = kfvKey(t); + string op = kfvOp(t); + + if (op == SET_COMMAND) + { + SWSS_LOG_DEBUG("TRANSCEIVER_INFO table has changed - SET command for port %s", alias.c_str()); + + if (m_pluggedModulesPort.find(alias) == m_pluggedModulesPort.end()) + { + m_pluggedModulesPort[alias] = m_portList[alias]; + + SWSS_LOG_DEBUG("Setting host_tx_signal allow for port %s", alias.c_str()); + setSaiHostTxSignal(m_pluggedModulesPort[alias], true); + } + } + else if (op == DEL_COMMAND) + { + SWSS_LOG_DEBUG("TRANSCEIVER_INFO table has changed - DEL command for port %s", alias.c_str()); + + Port p; + if (m_pluggedModulesPort.find(alias) != m_pluggedModulesPort.end()) + { + p = m_pluggedModulesPort[alias]; + m_pluggedModulesPort.erase(alias); + SWSS_LOG_DEBUG("Setting host_tx_signal NOT allow for port %s", alias.c_str()); + setSaiHostTxSignal(p, false); + } + } + + it = consumer.m_toSync.erase(it); + } +} + +bool PortsOrch::setSaiHostTxSignal(const Port &port, bool enable) +{ + sai_attribute_t attr; + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = enable; + sai_status_t status = sai_port_api->set_port_attribute(port.m_port_id, &attr); + + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Could not setSAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE to port 0x%" PRIx64, port.m_port_id); + return false; + } + + return true; +} + void PortsOrch::doLagTask(Consumer &consumer) { SWSS_LOG_ENTER(); @@ -4761,10 +5022,18 @@ void PortsOrch::doTask(Consumer &consumer) string table_name = consumer.getTableName(); - if (table_name == APP_PORT_TABLE_NAME) + if (table_name == STATE_TRANSCEIVER_INFO_TABLE_NAME) + { + doTransceiverInfoTableTask(consumer); + } + else if (table_name == APP_PORT_TABLE_NAME) { doPortTask(consumer); } + else if (table_name == APP_SEND_TO_INGRESS_PORT_TABLE_NAME) + { + doSendToIngressPortTask(consumer); + } else { /* Wait for all ports to be initialized */ @@ -5047,6 +5316,7 @@ bool PortsOrch::initializePort(Port &port) port.m_oper_status = SAI_PORT_OPER_STATUS_DOWN; } + /* initialize port admin status */ if (!getPortAdminStatus(port.m_port_id, port.m_admin_state_up)) { @@ -5067,6 +5337,22 @@ bool PortsOrch::initializePort(Port &port) SWSS_LOG_ERROR("Failed to get initial port mtu %d", port.m_mtu); } + /* initialize port host_tx_ready value (only for supporting systems) */ + if (m_cmisModuleAsicSyncSupported) + { + bool hostTxReadyVal; + if (!getPortHostTxReady(port, hostTxReadyVal)) + { + SWSS_LOG_ERROR("Failed to get host_tx_ready value from SAI to Port %" PRIx64 , port.m_port_id); + } + /* set value to state DB */ + + string hostTxReadyStr = hostTxReadyVal ? "true" : "false"; + + SWSS_LOG_DEBUG("Received host_tx_ready current status: port_id: 0x%" PRIx64 " status: %s", port.m_port_id, hostTxReadyStr.c_str()); + m_portStateTable.hset(port.m_alias, "host_tx_ready", hostTxReadyStr); + } + /* * always initialize Port SAI_HOSTIF_ATTR_OPER_STATUS based on oper_status value in appDB. */ @@ -5138,6 +5424,64 @@ bool PortsOrch::addHostIntfs(Port &port, string alias, sai_object_id_t &host_int return true; } +ReturnCode PortsOrch::addSendToIngressHostIf(const std::string &send_to_ingress_name) +{ + SWSS_LOG_ENTER(); + + // For SendToIngress port, add the host interface and bind to the CPU port + vector ingress_attribs; + sai_attribute_t attr; + + attr.id = SAI_HOSTIF_ATTR_TYPE; + attr.value.s32 = SAI_HOSTIF_TYPE_NETDEV; + ingress_attribs.push_back(attr); + + attr.id = SAI_HOSTIF_ATTR_NAME; + auto size = sizeof(attr.value.chardata); + strncpy(attr.value.chardata, send_to_ingress_name.c_str(), + size - 1); + attr.value.chardata[size - 1] = '\0'; + ingress_attribs.push_back(attr); + + // If this isn't passed as true, the false setting makes + // the device unready for later attempts to set UP/RUNNING + attr.id = SAI_HOSTIF_ATTR_OPER_STATUS; + attr.value.booldata = true; + ingress_attribs.push_back(attr); + + // Get CPU port object id to signal send to ingress + attr.id = SAI_HOSTIF_ATTR_OBJ_ID; + attr.value.oid = m_cpuPort.m_port_id; + ingress_attribs.push_back(attr); + + LOG_AND_RETURN_IF_ERROR(sai_hostif_api->create_hostif(&m_cpuPort.m_hif_id, + gSwitchId, + (uint32_t)ingress_attribs.size(), + ingress_attribs.data())); + + return ReturnCode(); +} + +ReturnCode PortsOrch::removeSendToIngressHostIf() +{ + SWSS_LOG_ENTER(); + + if (SAI_NULL_OBJECT_ID == m_cpuPort.m_hif_id) + { + ReturnCode rc = ReturnCode(StatusCode::SWSS_RC_NOT_FOUND) + << "Can't delete invalid SendToIngress hostif with SAI_NULL_OBJECT_ID oid"; + SWSS_LOG_ERROR("%s", rc.message().c_str()); + return rc; + } + + CHECK_ERROR_AND_LOG_AND_RETURN( + sai_hostif_api->remove_hostif(m_cpuPort.m_hif_id), + "Failed to delete SendToIngress hostif:0x" + << std::hex << m_cpuPort.m_hif_id); + + return ReturnCode(); +} + bool PortsOrch::setBridgePortLearningFDB(Port &port, sai_bridge_port_fdb_learning_mode_t mode) { // TODO: how to support 1D bridge? @@ -6363,7 +6707,7 @@ void PortsOrch::generateQueueMap(map queuesState { auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); - if (isCreateAllQueues) + if (isCreateAllQueues && maxQueueNumber) { flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); } @@ -6501,7 +6845,7 @@ void PortsOrch::addQueueFlexCounters(map queuesS { auto maxQueueNumber = getNumberOfPortSupportedQueueCounters(it.second.m_alias); FlexCounterQueueStates flexCounterQueueState(maxQueueNumber); - if (isCreateAllQueues) + if (isCreateAllQueues && maxQueueNumber) { flexCounterQueueState.enableQueueCounters(0, maxQueueNumber - 1); } @@ -6578,7 +6922,7 @@ void PortsOrch::addQueueWatermarkFlexCounters(map pgsSta { auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); FlexCounterPgStates flexCounterPgState(maxPgNumber); - if (isCreateAllPgs) + if (isCreateAllPgs && maxPgNumber) { flexCounterPgState.enablePgCounters(0, maxPgNumber - 1); } @@ -6877,7 +7221,7 @@ void PortsOrch::addPriorityGroupFlexCounters(map pg { auto maxPgNumber = getNumberOfPortSupportedPgCounters(it.second.m_alias); FlexCounterPgStates flexCounterPgState(maxPgNumber); - if (isCreateAllPgs) + if (isCreateAllPgs && maxPgNumber) { flexCounterPgState.enablePgCounters(0, maxPgNumber - 1); } @@ -6946,7 +7290,7 @@ void PortsOrch::addPriorityGroupWatermarkFlexCounters(mapget_port_attribute(port.m_port_id, 1, &attr); if (ret != SAI_STATUS_SUCCESS) { - SWSS_LOG_ERROR("Failed to get oper fec for %s", port.m_alias.c_str()); + SWSS_LOG_NOTICE("Failed to get oper fec for %s", port.m_alias.c_str()); return false; } @@ -7769,11 +8125,11 @@ bool PortsOrch::initGearboxPort(Port &port) } attr.value.s32 = sai_fec; attrs.push_back(attr); - + if (fec_override_sup) { attr.id = SAI_PORT_ATTR_AUTO_NEG_FEC_MODE_OVERRIDE; - + attr.value.booldata = m_portHlpr.fecIsOverrideRequired(m_gearboxPortMap[port.m_index].system_fec); attrs.push_back(attr); } @@ -7786,6 +8142,13 @@ bool PortsOrch::initGearboxPort(Port &port) attr.value.booldata = m_gearboxPortMap[port.m_index].system_training; attrs.push_back(attr); + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrs.push_back(attr); + } + status = sai_port_api->create_port(&systemPort, phyOid, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -7887,6 +8250,13 @@ bool PortsOrch::initGearboxPort(Port &port) attr.value.u32 = media_type_map[m_gearboxPortMap[port.m_index].line_adver_media_type]; attrs.push_back(attr); + if (m_cmisModuleAsicSyncSupported) + { + attr.id = SAI_PORT_ATTR_HOST_TX_SIGNAL_ENABLE; + attr.value.booldata = false; + attrs.push_back(attr); + } + status = sai_port_api->create_port(&linePort, phyOid, static_cast(attrs.size()), attrs.data()); if (status != SAI_STATUS_SUCCESS) { @@ -8640,3 +9010,4 @@ void PortsOrch::doTask(swss::SelectableTimer &timer) m_port_state_poller->stop(); } } + diff --git a/orchagent/portsorch.h b/orchagent/portsorch.h index 40f63dad0d3..85355b25c0f 100755 --- a/orchagent/portsorch.h +++ b/orchagent/portsorch.h @@ -16,6 +16,7 @@ #include "flexcounterorch.h" #include "events.h" +#include "port/port_capabilities.h" #include "port/porthlpr.h" #include "port/portschema.h" @@ -239,6 +240,7 @@ class PortsOrch : public Orch, public Subject unique_ptr
m_counterSysPortTable; unique_ptr
m_counterLagTable; unique_ptr
m_portTable; + unique_ptr
m_sendToIngressPortTable; unique_ptr
m_gearboxTable; unique_ptr
m_queueTable; unique_ptr
m_voqTable; @@ -276,6 +278,7 @@ class PortsOrch : public Orch, public Subject std::map m_portSupportedFecModes; bool m_initDone = false; + bool m_isSendToIngressPortConfigured = false; Port m_cpuPort; // TODO: Add Bridge/Vlan class sai_object_id_t m_default1QBridge; @@ -307,6 +310,7 @@ class PortsOrch : public Orch, public Subject map, sai_object_id_t> m_portListLaneMap; map, PortConfig> m_lanesAliasSpeedMap; map m_portList; + map m_pluggedModulesPort; map m_portVlanMember; map> m_port_voq_ids; /* mapping from SAI object ID to Name for faster @@ -323,18 +327,24 @@ class PortsOrch : public Orch, public Subject map m_bridge_port_ref_count; NotificationConsumer* m_portStatusNotificationConsumer; + NotificationConsumer* m_portHostTxReadyNotificationConsumer; + bool fec_override_sup = false; bool oper_fec_sup = false; swss::SelectableTimer *m_port_state_poller = nullptr; + bool m_cmisModuleAsicSyncSupported = false; + void doTask() override; void doTask(Consumer &consumer); void doPortTask(Consumer &consumer); + void doSendToIngressPortTask(Consumer &consumer); void doVlanTask(Consumer &consumer); void doVlanMemberTask(Consumer &consumer); void doLagTask(Consumer &consumer); void doLagMemberTask(Consumer &consumer); + void doTransceiverInfoTableTask(Consumer &consumer); void doTask(NotificationConsumer &consumer); void doTask(swss::SelectableTimer &timer); @@ -377,6 +387,7 @@ class PortsOrch : public Orch, public Subject bool setPortAdminStatus(Port &port, bool up); bool getPortAdminStatus(sai_object_id_t id, bool& up); bool getPortMtu(const Port& port, sai_uint32_t &mtu); + bool getPortHostTxReady(const Port& port, bool &hostTxReadyVal); bool setPortMtu(const Port& port, sai_uint32_t mtu); bool setPortTpid(Port &port, sai_uint16_t tpid); bool setPortPvid (Port &port, sai_uint32_t pvid); @@ -388,6 +399,9 @@ class PortsOrch : public Orch, public Subject bool setBridgePortAdminStatus(sai_object_id_t id, bool up); + bool setSaiHostTxSignal(const Port &port, bool enable); + + void setHostTxReady(sai_object_id_t portId, const std::string &status); // Get supported speeds on system side bool isSpeedSupported(const std::string& alias, sai_object_id_t port_id, sai_uint32_t speed); void getPortSupportedSpeeds(const std::string& alias, sai_object_id_t port_id, PortSupportedSpeeds &supported_speeds); @@ -464,6 +478,9 @@ class PortsOrch : public Orch, public Subject bool getSaiAclBindPointType(Port::Type type, sai_acl_bind_point_type_t &sai_acl_bind_type); + + ReturnCode addSendToIngressHostIf(const std::string &send_to_ingress_name); + ReturnCode removeSendToIngressHostIf(); void initGearbox(); bool initGearboxPort(Port &port); bool getPortOperFec(const Port& port, sai_port_fec_mode_t &fec_mode) const; @@ -502,6 +519,9 @@ class PortsOrch : public Orch, public Subject // Port config aggregator std::unordered_map> m_portConfigMap; + // Port OA capabilities + PortCapabilities m_portCap; + // Port OA helper PortHelper m_portHlpr; }; diff --git a/orchagent/qosorch.cpp b/orchagent/qosorch.cpp index 4d718977145..90fc6fc7669 100644 --- a/orchagent/qosorch.cpp +++ b/orchagent/qosorch.cpp @@ -1783,7 +1783,7 @@ task_process_status QosOrch::handleQueueTable(Consumer& consumer, KeyOpFieldsVal return task_process_status::task_invalid_entry; } - if(tokens[0] == gMyHostName) + if((tokens[0] == gMyHostName) && (tokens[1] == gMyAsicName)) { local_port = true; local_port_name = tokens[2]; diff --git a/orchagent/saihelper.cpp b/orchagent/saihelper.cpp index e50f697a85c..6fcf4c5014a 100644 --- a/orchagent/saihelper.cpp +++ b/orchagent/saihelper.cpp @@ -642,6 +642,20 @@ task_process_status handleSaiSetStatus(sai_api_t api, sai_status_t status, void break; } break; + case SAI_API_BUFFER: + switch (status) + { + case SAI_STATUS_INSUFFICIENT_RESOURCES: + SWSS_LOG_ERROR("Encountered SAI_STATUS_INSUFFICIENT_RESOURCES in set operation, task failed, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + return task_failed; + default: + SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", + sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); + handleSaiFailure(true); + break; + } + break; default: SWSS_LOG_ERROR("Encountered failure in set operation, exiting orchagent, SAI API: %s, status: %s", sai_serialize_api(api).c_str(), sai_serialize_status(status).c_str()); diff --git a/orchagent/srv6orch.cpp b/orchagent/srv6orch.cpp index 5a80576e3a2..d1177cddc2b 100644 --- a/orchagent/srv6orch.cpp +++ b/orchagent/srv6orch.cpp @@ -10,6 +10,8 @@ using namespace std; using namespace swss; +#define ADJ_DELIMITER ',' + extern sai_object_id_t gSwitchId; extern sai_object_id_t gVirtualRouterId; extern sai_object_id_t gUnderlayIfId; @@ -422,6 +424,175 @@ bool Srv6Orch::mySidExists(string my_sid_string) return false; } +/* + * Neighbor change notification to be processed for the SRv6 MySID entries + * + * In summary, this function handles both add and delete neighbor notifications + * + * When a neighbor ADD notification is received, we do the following steps: + * - We walk through the list of pending SRv6 MySID entries that are waiting for this neighbor to be ready + * - For each SID, we install the SID into the ASIC + * - We remove the SID from the pending MySID entries list + * + * When a neighbor DELETE notification is received, we do the following steps: + * - We walk through the list of pending SRv6 MySID entries installed in the ASIC + * - For each SID, we remove the SID from the ASIC + * - We add the SID to the pending MySID entries list + */ +void Srv6Orch::updateNeighbor(const NeighborUpdate& update) +{ + SWSS_LOG_ENTER(); + + /* Check if the received notification is a neighbor add or a neighbor delete */ + if (update.add) + { + /* + * It's a neighbor add notification, let's walk through the list of SRv6 MySID entries + * that are waiting for that neighbor to be ready, and install them into the ASIC. + */ + + SWSS_LOG_INFO("Neighbor ADD event: %s alias '%s', installing pending SRv6 SIDs", + update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + + auto it = m_pendingSRv6MySIDEntries.find(NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)); + if (it == m_pendingSRv6MySIDEntries.end()) + { + /* No SID is waiting for this neighbor. Nothing to do */ + return; + } + auto &nexthop_key = it->first; + auto &pending_my_sid_entries = it->second; + + for (auto iter = pending_my_sid_entries.begin(); iter != pending_my_sid_entries.end();) + { + string my_sid_string = get<0>(*iter); + const string dt_vrf = get<1>(*iter); + const string adj = get<2>(*iter); + const string end_action = get<3>(*iter); + + SWSS_LOG_INFO("Creating SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(), adj.c_str()); + + if(!createUpdateMysidEntry(my_sid_string, dt_vrf, adj, end_action)) + { + SWSS_LOG_ERROR("Failed to create/update my_sid entry for sid %s", my_sid_string.c_str()); + ++iter; + continue; + } + + SWSS_LOG_INFO("SID %s created successfully", my_sid_string.c_str()); + + iter = pending_my_sid_entries.erase(iter); + } + + if (pending_my_sid_entries.size() == 0) + { + m_pendingSRv6MySIDEntries.erase(nexthop_key); + } + } + else + { + /* + * It's a neighbor delete notification, let's uninstall the SRv6 MySID entries associated with that + * nexthop from the ASIC, and add them to the SRv6 MySID entries pending set. + */ + + SWSS_LOG_INFO("Neighbor DELETE event: %s alias '%s', removing associated SRv6 SIDs", + update.entry.ip_address.to_string().c_str(), update.entry.alias.c_str()); + + for (auto it = srv6_my_sid_table_.begin(); it != srv6_my_sid_table_.end();) + { + /* Skip SIDs that are not associated with a L3 Adjacency */ + if (it->second.endAdjString.empty()) + { + ++it; + continue; + } + + try + { + /* Skip SIDs that are not associated with this neighbor */ + if (IpAddress(it->second.endAdjString) != update.entry.ip_address) + { + ++it; + continue; + } + } + catch (const std::invalid_argument &e) + { + /* SRv6 SID is associated with an invalid L3 Adjacency IP address, skipping */ + ++it; + continue; + } + + /* + * Save SID entry information to temp variables, before removing the SID. + * This information will be consumed used later. + */ + string my_sid_string = it->first; + const string dt_vrf = it->second.endVrfString; + const string adj = it->second.endAdjString; + string end_action; + for (auto iter = end_behavior_map.begin(); iter != end_behavior_map.end(); iter++) + { + if (iter->second == it->second.endBehavior) + { + end_action = iter->first; + break; + } + } + + /* Skip SIDs with unknown SRv6 behavior */ + if (end_action.empty()) + { + ++it; + continue; + } + + SWSS_LOG_INFO("Removing SID %s, action %s, vrf %s, adj %s", my_sid_string.c_str(), dt_vrf.c_str(), adj.c_str(), end_action.c_str()); + + /* Let's delete the SID from the ASIC */ + unordered_map::iterator tmp = it; + ++tmp; + if(!deleteMysidEntry(it->first)) + { + SWSS_LOG_ERROR("Failed to delete my_sid entry for sid %s", it->first.c_str()); + ++it; + continue; + } + it = tmp; + + SWSS_LOG_INFO("SID %s removed successfully", my_sid_string.c_str()); + + /* + * Finally, add the SID to the pending MySID entries set, so that we can re-install it + * when the neighbor comes back + */ + auto pending_mysid_entry = make_tuple(my_sid_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[NextHopKey(update.entry.ip_address.to_string(), update.entry.alias)].insert(pending_mysid_entry); + } + } +} + +void Srv6Orch::update(SubjectType type, void *cntx) +{ + SWSS_LOG_ENTER(); + + assert(cntx); + + switch(type) { + case SUBJECT_TYPE_NEIGH_CHANGE: + { + NeighborUpdate *update = static_cast(cntx); + updateNeighbor(*update); + break; + } + default: + // Received update in which we are not interested + // Ignore it + return; + } +} + bool Srv6Orch::sidEntryEndpointBehavior(string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor) { @@ -452,7 +623,23 @@ bool Srv6Orch::mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_b return false; } -bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, const string end_action) +bool Srv6Orch::mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior) +{ + if (end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6 || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_ENCAPS_RED || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_B6_INSERT_RED || + end_behavior == SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA) + { + return true; + } + return false; +} + +bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, const string adj, const string end_action) { SWSS_LOG_ENTER(); vector attributes; @@ -490,9 +677,9 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, my_sid_entry = srv6_my_sid_table_[key_string].entry; } - SWSS_LOG_INFO("MySid: sid %s, action %s, vrf %s, block %d, node %d, func %d, arg %d dt_vrf %s", + SWSS_LOG_INFO("MySid: sid %s, action %s, vrf %s, block %d, node %d, func %d, arg %d dt_vrf %s, adj %s", my_sid_string.c_str(), end_action.c_str(), dt_vrf.c_str(),my_sid_entry.locator_block_len, my_sid_entry.locator_node_len, - my_sid_entry.function_len, my_sid_entry.args_len, dt_vrf.c_str()); + my_sid_entry.function_len, my_sid_entry.args_len, dt_vrf.c_str(), adj.c_str()); if (sidEntryEndpointBehavior(end_action, end_behavior, end_flavor) != true) { @@ -529,6 +716,47 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, attributes.push_back(vrf_attr); vrf_update = true; } + sai_attribute_t nh_attr; + NextHopKey nexthop; + bool nh_update = false; + if (mySidNextHopRequired(end_behavior)) + { + sai_object_id_t next_hop_id; + + vector adjv = tokenize(adj, ADJ_DELIMITER); + if (adjv.size() > 1) + { + SWSS_LOG_ERROR("Failed to create my_sid entry %s adj %s: ECMP adjacency not yet supported", key_string.c_str(), adj.c_str()); + return false; + } + + nexthop = NextHopKey(adj); + SWSS_LOG_INFO("Adjacency %s", adj.c_str()); + if (m_neighOrch->hasNextHop(nexthop)) + { + SWSS_LOG_INFO("Nexthop for adjacency %s exists in DB", adj.c_str()); + next_hop_id = m_neighOrch->getNextHopId(nexthop); + if(next_hop_id == SAI_NULL_OBJECT_ID) + { + SWSS_LOG_INFO("Failed to get nexthop for adjacency %s", adj.c_str()); + SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); + auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); + return false; + } + } + else + { + SWSS_LOG_INFO("Nexthop for adjacency %s doesn't exist in DB yet", adj.c_str()); + auto pending_mysid_entry = make_tuple(key_string, dt_vrf, adj, end_action); + m_pendingSRv6MySIDEntries[nexthop].insert(pending_mysid_entry); + return false; + } + nh_attr.id = SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID; + nh_attr.value.oid = next_hop_id; + attributes.push_back(nh_attr); + nh_update = true; + } attr.id = SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR; attr.value.s32 = end_behavior; attributes.push_back(attr); @@ -559,6 +787,15 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, return false; } } + if (nh_update) + { + status = sai_srv6_api->set_my_sid_entry_attribute(&my_sid_entry, &nh_attr); + if(status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR("Failed to update nexthop to my_sid_entry %s, rv %d", key_string.c_str(), status); + return false; + } + } } SWSS_LOG_INFO("Store keystring %s in cache", key_string.c_str()); if(vrf_update) @@ -566,6 +803,15 @@ bool Srv6Orch::createUpdateMysidEntry(string my_sid_string, const string dt_vrf, m_vrfOrch->increaseVrfRefCount(dt_vrf); srv6_my_sid_table_[key_string].endVrfString = dt_vrf; } + if(nh_update) + { + m_neighOrch->increaseNextHopRefCount(nexthop, 1); + + SWSS_LOG_INFO("Increasing refcount to %d for Nexthop %s", + m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + + srv6_my_sid_table_[key_string].endAdjString = adj; + } srv6_my_sid_table_[key_string].endBehavior = end_behavior; srv6_my_sid_table_[key_string].entry = my_sid_entry; @@ -596,6 +842,15 @@ bool Srv6Orch::deleteMysidEntry(const string my_sid_string) { m_vrfOrch->decreaseVrfRefCount(srv6_my_sid_table_[my_sid_string].endVrfString); } + /* Decrease NextHop refcount */ + if (mySidNextHopRequired(srv6_my_sid_table_[my_sid_string].endBehavior)) + { + NextHopKey nexthop = NextHopKey(srv6_my_sid_table_[my_sid_string].endAdjString); + m_neighOrch->decreaseNextHopRefCount(nexthop, 1); + + SWSS_LOG_INFO("Decreasing refcount to %d for Nexthop %s", + m_neighOrch->getNextHopRefCount(nexthop), nexthop.to_string(false,true).c_str()); + } srv6_my_sid_table_.erase(my_sid_string); return true; } @@ -604,7 +859,7 @@ void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) { SWSS_LOG_ENTER(); string op = kfvOp(tuple); - string end_action, dt_vrf; + string end_action, dt_vrf, adj; /* Key for mySid : block_len:node_len:function_len:args_len:sid-ip */ string keyString = kfvKey(tuple); @@ -619,10 +874,14 @@ void Srv6Orch::doTaskMySidTable(const KeyOpFieldsValuesTuple & tuple) { dt_vrf = fvValue(i); } + if(fvField(i) == "adj") + { + adj = fvValue(i); + } } if (op == SET_COMMAND) { - if(!createUpdateMysidEntry(keyString, dt_vrf, end_action)) + if(!createUpdateMysidEntry(keyString, dt_vrf, adj, end_action)) { SWSS_LOG_ERROR("Failed to create/update my_sid entry for sid %s", keyString.c_str()); return; diff --git a/orchagent/srv6orch.h b/orchagent/srv6orch.h index e24f5e00f5d..a3e39b56326 100644 --- a/orchagent/srv6orch.h +++ b/orchagent/srv6orch.h @@ -43,6 +43,7 @@ struct MySidEntry sai_my_sid_entry_t entry; sai_my_sid_entry_endpoint_behavior_t endBehavior; string endVrfString; // Used for END.T, END.DT4, END.DT6 and END.DT46, + string endAdjString; // Used for END.X, END.DX4, END.DX6 }; typedef unordered_map SidTable; @@ -52,7 +53,7 @@ typedef unordered_map Srv6MySidTable; #define SID_LIST_DELIMITER ',' #define MY_SID_KEY_DELIMITER ':' -class Srv6Orch : public Orch +class Srv6Orch : public Orch, public Observer { public: Srv6Orch(DBConnector *applDb, vector &tableNames, SwitchOrch *switchOrch, VRFOrch *vrfOrch, NeighOrch *neighOrch): @@ -63,10 +64,11 @@ class Srv6Orch : public Orch m_sidTable(applDb, APP_SRV6_SID_LIST_TABLE_NAME), m_mysidTable(applDb, APP_SRV6_MY_SID_TABLE_NAME) { + m_neighOrch->attach(this); } ~Srv6Orch() { - + m_neighOrch->detach(this); } bool srv6Nexthops(const NextHopGroupKey &nextHops, sai_object_id_t &next_hop_id); bool removeSrv6Nexthops(const NextHopGroupKey &nhg); @@ -81,15 +83,17 @@ class Srv6Orch : public Orch bool createSrv6Tunnel(const string srv6_source); bool createSrv6Nexthop(const NextHopKey &nh); bool srv6NexthopExists(const NextHopKey &nh); - bool createUpdateMysidEntry(string my_sid_string, const string vrf, const string end_action); + bool createUpdateMysidEntry(string my_sid_string, const string vrf, const string adj, const string end_action); bool deleteMysidEntry(const string my_sid_string); bool sidEntryEndpointBehavior(const string action, sai_my_sid_entry_endpoint_behavior_t &end_behavior, sai_my_sid_entry_endpoint_behavior_flavor_t &end_flavor); bool mySidExists(const string mysid_string); bool mySidVrfRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); + bool mySidNextHopRequired(const sai_my_sid_entry_endpoint_behavior_t end_behavior); void srv6TunnelUpdateNexthops(const string srv6_source, const NextHopKey nhkey, bool insert); size_t srv6TunnelNexthopSize(const string srv6_source); + void updateNeighbor(const NeighborUpdate& update); ProducerStateTable m_sidTable; ProducerStateTable m_mysidTable; @@ -100,6 +104,15 @@ class Srv6Orch : public Orch VRFOrch *m_vrfOrch; SwitchOrch *m_switchOrch; NeighOrch *m_neighOrch; + + /* + * Map to store the SRv6 MySID entries not yet configured in ASIC because associated to a non-ready nexthop + * + * Key: nexthop + * Value: a set of SID entries that are waiting for the nexthop to be ready + * each SID entry is encoded as a tuple + */ + map>> m_pendingSRv6MySIDEntries; }; #endif // SWSS_SRV6ORCH_H diff --git a/orchagent/switch/switch_capabilities.cpp b/orchagent/switch/switch_capabilities.cpp index d826a9e49f2..d1f191bf391 100644 --- a/orchagent/switch/switch_capabilities.cpp +++ b/orchagent/switch/switch_capabilities.cpp @@ -1,9 +1,10 @@ // includes ----------------------------------------------------------------------------------------------------------- extern "C" { +#include #include #include -#include +#include #include } @@ -11,6 +12,7 @@ extern "C" { #include #include #include +#include #include #include @@ -26,8 +28,14 @@ using namespace swss; // defines ------------------------------------------------------------------------------------------------------------ #define SWITCH_CAPABILITY_HASH_NATIVE_HASH_FIELD_LIST_FIELD "HASH|NATIVE_HASH_FIELD_LIST" -#define SWITCH_CAPABILITY_ECMP_HASH_CAPABLE_FIELD "ECMP_HASH_CAPABLE" -#define SWITCH_CAPABILITY_LAG_HASH_CAPABLE_FIELD "LAG_HASH_CAPABLE" + +#define SWITCH_CAPABILITY_ECMP_HASH_CAPABLE_FIELD "ECMP_HASH_CAPABLE" +#define SWITCH_CAPABILITY_LAG_HASH_CAPABLE_FIELD "LAG_HASH_CAPABLE" + +#define SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_FIELD "ECMP_HASH_ALGORITHM" +#define SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_CAPABLE_FIELD "ECMP_HASH_ALGORITHM_CAPABLE" +#define SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_FIELD "LAG_HASH_ALGORITHM" +#define SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_CAPABLE_FIELD "LAG_HASH_ALGORITHM_CAPABLE" #define SWITCH_CAPABILITY_KEY "switch" @@ -58,20 +66,45 @@ static const std::unordered_map swHashHash { SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT, SWITCH_HASH_FIELD_INNER_L4_SRC_PORT } }; +static const std::unordered_map swHashAlgorithmMap = +{ + { SAI_HASH_ALGORITHM_CRC, SWITCH_HASH_ALGORITHM_CRC }, + { SAI_HASH_ALGORITHM_XOR, SWITCH_HASH_ALGORITHM_XOR }, + { SAI_HASH_ALGORITHM_RANDOM, SWITCH_HASH_ALGORITHM_RANDOM }, + { SAI_HASH_ALGORITHM_CRC_32LO, SWITCH_HASH_ALGORITHM_CRC_32LO }, + { SAI_HASH_ALGORITHM_CRC_32HI, SWITCH_HASH_ALGORITHM_CRC_32HI }, + { SAI_HASH_ALGORITHM_CRC_CCITT, SWITCH_HASH_ALGORITHM_CRC_CCITT }, + { SAI_HASH_ALGORITHM_CRC_XOR, SWITCH_HASH_ALGORITHM_CRC_XOR } +}; + // variables ---------------------------------------------------------------------------------------------------------- extern sai_object_id_t gSwitchId; // functions ---------------------------------------------------------------------------------------------------------- -static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) noexcept +static std::string toStr(sai_object_type_t objType, sai_attr_id_t attrId) { const auto *meta = sai_metadata_get_attr_metadata(objType, attrId); return meta != nullptr ? meta->attridname : "UNKNOWN"; } -static std::string toStr(const std::set &value) noexcept +static std::string toStr(sai_native_hash_field_t value) +{ + const auto *name = sai_metadata_get_native_hash_field_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(sai_hash_algorithm_t value) +{ + const auto *name = sai_metadata_get_hash_algorithm_name(value); + + return name != nullptr ? name : "UNKNOWN"; +} + +static std::string toStr(const std::set &value) { std::vector strList; @@ -87,11 +120,33 @@ static std::string toStr(const std::set &value) noexcep return join(",", strList.cbegin(), strList.cend()); } -static std::string toStr(bool value) noexcept +static std::string toStr(const std::set &value) +{ + std::vector strList; + + for (const auto &cit1 : value) + { + const auto &cit2 = swHashAlgorithmMap.find(cit1); + if (cit2 != swHashAlgorithmMap.cend()) + { + strList.push_back(cit2->second); + } + } + + return join(",", strList.cbegin(), strList.cend()); +} + +static std::string toStr(bool value) { return value ? "true" : "false"; } +template +static void insertBack(T1 &out, const T2 &in) +{ + out.insert(out.end(), in.cbegin(), in.cend()); +} + // Switch capabilities ------------------------------------------------------------------------------------------------ DBConnector SwitchCapabilities::stateDb(SWITCH_STATE_DB_NAME, SWITCH_STATE_DB_TIMEOUT); @@ -122,8 +177,20 @@ bool SwitchCapabilities::isSwitchLagHashSupported() const return nativeHashFieldList.isAttrSupported && lagHash.isAttrSupported; } +bool SwitchCapabilities::isSwitchEcmpHashAlgorithmSupported() const +{ + return switchCapabilities.ecmpHashAlgorithm.isAttrSupported; +} + +bool SwitchCapabilities::isSwitchLagHashAlgorithmSupported() const +{ + return switchCapabilities.lagHashAlgorithm.isAttrSupported; +} + bool SwitchCapabilities::validateSwitchHashFieldCap(const std::set &hfSet) const { + SWSS_LOG_ENTER(); + if (!hashCapabilities.nativeHashFieldList.isEnumSupported) { return true; @@ -139,7 +206,7 @@ bool SwitchCapabilities::validateSwitchHashFieldCap(const std::set +bool SwitchCapabilities::validateSwitchHashAlgorithmCap(const T &obj, sai_hash_algorithm_t haValue) const +{ + SWSS_LOG_ENTER(); + + if (!obj.isEnumSupported) + { + return true; + } + + if (obj.haSet.empty()) + { + SWSS_LOG_ERROR("Failed to validate hash algorithm: no hash algorithm capabilities"); + return false; + } + + if (obj.haSet.count(haValue) == 0) + { + SWSS_LOG_ERROR("Failed to validate hash algorithm: value(%s) is not supported", toStr(haValue).c_str()); + return false; + } + + return true; +} + FieldValueTuple SwitchCapabilities::makeHashFieldCapDbEntry() const { const auto &nativeHashFieldList = hashCapabilities.nativeHashFieldList; @@ -173,6 +275,42 @@ FieldValueTuple SwitchCapabilities::makeLagHashCapDbEntry() const return FieldValueTuple(field, value); } +std::vector SwitchCapabilities::makeEcmpHashAlgorithmCapDbEntry() const +{ + const auto &ecmpHashAlgorithm = switchCapabilities.ecmpHashAlgorithm; + + std::vector fvList; + + fvList.emplace_back( + SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_FIELD, + ecmpHashAlgorithm.isEnumSupported ? toStr(ecmpHashAlgorithm.haSet) : "N/A" + ); + fvList.emplace_back( + SWITCH_CAPABILITY_ECMP_HASH_ALGORITHM_CAPABLE_FIELD, + toStr(isSwitchEcmpHashAlgorithmSupported()) + ); + + return fvList; +} + +std::vector SwitchCapabilities::makeLagHashAlgorithmCapDbEntry() const +{ + const auto &lagHashAlgorithm = switchCapabilities.lagHashAlgorithm; + + std::vector fvList; + + fvList.emplace_back( + SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_FIELD, + lagHashAlgorithm.isEnumSupported ? toStr(lagHashAlgorithm.haSet) : "N/A" + ); + fvList.emplace_back( + SWITCH_CAPABILITY_LAG_HASH_ALGORITHM_CAPABLE_FIELD, + toStr(isSwitchLagHashAlgorithmSupported()) + ); + + return fvList; +} + sai_status_t SwitchCapabilities::queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const { sai_s32_list_t enumList = { .count = 0, .list = nullptr }; @@ -256,7 +394,7 @@ void SwitchCapabilities::queryHashCapabilities() queryHashNativeHashFieldListAttrCapabilities(); } -void SwitchCapabilities::querySwitchEcmpHashCapabilities() +void SwitchCapabilities::querySwitchEcmpHashAttrCapabilities() { SWSS_LOG_ENTER(); @@ -286,7 +424,7 @@ void SwitchCapabilities::querySwitchEcmpHashCapabilities() switchCapabilities.ecmpHash.isAttrSupported = true; } -void SwitchCapabilities::querySwitchLagHashCapabilities() +void SwitchCapabilities::querySwitchLagHashAttrCapabilities() { SWSS_LOG_ENTER(); @@ -316,39 +454,160 @@ void SwitchCapabilities::querySwitchLagHashCapabilities() switchCapabilities.lagHash.isAttrSupported = true; } +void SwitchCapabilities::querySwitchEcmpHashAlgorithmEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector haList; + auto status = queryEnumCapabilitiesSai( + haList, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) enum value capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + auto &haSet = switchCapabilities.ecmpHashAlgorithm.haSet; + std::transform( + haList.cbegin(), haList.cend(), std::inserter(haSet, haSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + switchCapabilities.ecmpHashAlgorithm.isEnumSupported = true; +} + +void SwitchCapabilities::querySwitchEcmpHashAlgorithmAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + switchCapabilities.ecmpHashAlgorithm.isAttrSupported = true; +} + +void SwitchCapabilities::querySwitchLagHashAlgorithmEnumCapabilities() +{ + SWSS_LOG_ENTER(); + + std::vector haList; + auto status = queryEnumCapabilitiesSai( + haList, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) enum value capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + auto &haSet = switchCapabilities.lagHashAlgorithm.haSet; + std::transform( + haList.cbegin(), haList.cend(), std::inserter(haSet, haSet.begin()), + [](sai_int32_t value) { return static_cast(value); } + ); + + switchCapabilities.lagHashAlgorithm.isEnumSupported = true; +} + +void SwitchCapabilities::querySwitchLagHashAlgorithmAttrCapabilities() +{ + SWSS_LOG_ENTER(); + + sai_attr_capability_t attrCap; + + auto status = queryAttrCapabilitiesSai( + attrCap, SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM + ); + if (status != SAI_STATUS_SUCCESS) + { + SWSS_LOG_ERROR( + "Failed to get attribute(%s) capabilities", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + if (!attrCap.set_implemented) + { + SWSS_LOG_WARN( + "Attribute(%s) SET is not implemented in SAI", + toStr(SAI_OBJECT_TYPE_SWITCH, SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM).c_str() + ); + return; + } + + switchCapabilities.lagHashAlgorithm.isAttrSupported = true; +} + void SwitchCapabilities::querySwitchCapabilities() { - querySwitchEcmpHashCapabilities(); - querySwitchLagHashCapabilities(); + querySwitchEcmpHashAttrCapabilities(); + querySwitchLagHashAttrCapabilities(); + + querySwitchEcmpHashAlgorithmEnumCapabilities(); + querySwitchEcmpHashAlgorithmAttrCapabilities(); + querySwitchLagHashAlgorithmEnumCapabilities(); + querySwitchLagHashAlgorithmAttrCapabilities(); } void SwitchCapabilities::writeHashCapabilitiesToDb() { SWSS_LOG_ENTER(); - auto key = SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY); - std::vector fvList = { makeHashFieldCapDbEntry() }; SwitchCapabilities::capTable.set(SWITCH_CAPABILITY_KEY, fvList); - SWSS_LOG_NOTICE("Wrote hash enum capabilities to State DB: %s key", key.c_str()); + SWSS_LOG_NOTICE( + "Wrote hash capabilities to State DB: %s key", + SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY).c_str() + ); } void SwitchCapabilities::writeSwitchCapabilitiesToDb() { SWSS_LOG_ENTER(); - auto key = SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY); - std::vector fvList = { makeEcmpHashCapDbEntry(), makeLagHashCapDbEntry() }; + insertBack(fvList, makeEcmpHashAlgorithmCapDbEntry()); + insertBack(fvList, makeLagHashAlgorithmCapDbEntry()); SwitchCapabilities::capTable.set(SWITCH_CAPABILITY_KEY, fvList); - SWSS_LOG_NOTICE("Wrote switch hash capabilities to State DB: %s key", key.c_str()); + SWSS_LOG_NOTICE( + "Wrote switch capabilities to State DB: %s key", + SwitchCapabilities::capTable.getKeyName(SWITCH_CAPABILITY_KEY).c_str() + ); } diff --git a/orchagent/switch/switch_capabilities.h b/orchagent/switch/switch_capabilities.h index 47dcb0c7ecb..9bff1df4979 100644 --- a/orchagent/switch/switch_capabilities.h +++ b/orchagent/switch/switch_capabilities.h @@ -1,9 +1,10 @@ #pragma once extern "C" { -#include #include +#include #include +#include } #include @@ -21,21 +22,39 @@ class SwitchCapabilities final bool isSwitchEcmpHashSupported() const; bool isSwitchLagHashSupported() const; + bool isSwitchEcmpHashAlgorithmSupported() const; + bool isSwitchLagHashAlgorithmSupported() const; + bool validateSwitchHashFieldCap(const std::set &hfSet) const; + bool validateSwitchEcmpHashAlgorithmCap(sai_hash_algorithm_t haValue) const; + bool validateSwitchLagHashAlgorithmCap(sai_hash_algorithm_t haValue) const; + private: + template + bool validateSwitchHashAlgorithmCap(const T &obj, sai_hash_algorithm_t haValue) const; + swss::FieldValueTuple makeHashFieldCapDbEntry() const; + swss::FieldValueTuple makeEcmpHashCapDbEntry() const; swss::FieldValueTuple makeLagHashCapDbEntry() const; + std::vector makeEcmpHashAlgorithmCapDbEntry() const; + std::vector makeLagHashAlgorithmCapDbEntry() const; + sai_status_t queryEnumCapabilitiesSai(std::vector &capList, sai_object_type_t objType, sai_attr_id_t attrId) const; sai_status_t queryAttrCapabilitiesSai(sai_attr_capability_t &attrCap, sai_object_type_t objType, sai_attr_id_t attrId) const; void queryHashNativeHashFieldListEnumCapabilities(); void queryHashNativeHashFieldListAttrCapabilities(); - void querySwitchEcmpHashCapabilities(); - void querySwitchLagHashCapabilities(); + void querySwitchEcmpHashAttrCapabilities(); + void querySwitchLagHashAttrCapabilities(); + + void querySwitchEcmpHashAlgorithmEnumCapabilities(); + void querySwitchEcmpHashAlgorithmAttrCapabilities(); + void querySwitchLagHashAlgorithmEnumCapabilities(); + void querySwitchLagHashAlgorithmAttrCapabilities(); void queryHashCapabilities(); void querySwitchCapabilities(); @@ -61,6 +80,18 @@ class SwitchCapabilities final struct { bool isAttrSupported = false; } lagHash; + + struct { + std::set haSet; + bool isEnumSupported = false; + bool isAttrSupported = false; + } ecmpHashAlgorithm; + + struct { + std::set haSet; + bool isEnumSupported = false; + bool isAttrSupported = false; + } lagHashAlgorithm; } switchCapabilities; static swss::DBConnector stateDb; diff --git a/orchagent/switch/switch_container.h b/orchagent/switch/switch_container.h index c56ae166f0b..a51b5514279 100644 --- a/orchagent/switch/switch_container.h +++ b/orchagent/switch/switch_container.h @@ -1,6 +1,7 @@ #pragma once extern "C" { +#include #include } @@ -24,5 +25,15 @@ class SwitchHash final bool is_set = false; } lag_hash; + struct { + sai_hash_algorithm_t value; + bool is_set = false; + } ecmp_hash_algorithm; + + struct { + sai_hash_algorithm_t value; + bool is_set = false; + } lag_hash_algorithm; + std::unordered_map fieldValueMap; }; diff --git a/orchagent/switch/switch_helper.cpp b/orchagent/switch/switch_helper.cpp index d91f382b25a..23a7c3fd5a6 100644 --- a/orchagent/switch/switch_helper.cpp +++ b/orchagent/switch/switch_helper.cpp @@ -1,5 +1,10 @@ // includes ----------------------------------------------------------------------------------------------------------- +extern "C" { +#include +#include +} + #include #include #include @@ -36,6 +41,17 @@ static const std::unordered_map swHashHash { SWITCH_HASH_FIELD_INNER_L4_SRC_PORT, SAI_NATIVE_HASH_FIELD_INNER_L4_SRC_PORT } }; +static const std::unordered_map swHashAlgorithmMap = +{ + { SWITCH_HASH_ALGORITHM_CRC, SAI_HASH_ALGORITHM_CRC }, + { SWITCH_HASH_ALGORITHM_XOR, SAI_HASH_ALGORITHM_XOR }, + { SWITCH_HASH_ALGORITHM_RANDOM, SAI_HASH_ALGORITHM_RANDOM }, + { SWITCH_HASH_ALGORITHM_CRC_32LO, SAI_HASH_ALGORITHM_CRC_32LO }, + { SWITCH_HASH_ALGORITHM_CRC_32HI, SAI_HASH_ALGORITHM_CRC_32HI }, + { SWITCH_HASH_ALGORITHM_CRC_CCITT, SAI_HASH_ALGORITHM_CRC_CCITT }, + { SWITCH_HASH_ALGORITHM_CRC_XOR, SAI_HASH_ALGORITHM_CRC_XOR } +}; + // switch helper ------------------------------------------------------------------------------------------------------ const SwitchHash& SwitchHelper::getSwHash() const @@ -86,6 +102,30 @@ bool SwitchHelper::parseSwHashFieldList(T &obj, const std::string &field, const return true; } +template +bool SwitchHelper::parseSwHashAlgorithm(T &obj, const std::string &field, const std::string &value) const +{ + SWSS_LOG_ENTER(); + + if (value.empty()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): empty value is prohibited", field.c_str()); + return false; + } + + const auto &cit = swHashAlgorithmMap.find(value); + if (cit == swHashAlgorithmMap.cend()) + { + SWSS_LOG_ERROR("Failed to parse field(%s): invalid value(%s)", field.c_str(), value.c_str()); + return false; + } + + obj.value = cit->second; + obj.is_set = true; + + return true; +} + bool SwitchHelper::parseSwHashEcmpHash(SwitchHash &hash, const std::string &field, const std::string &value) const { return parseSwHashFieldList(hash.ecmp_hash, field, value); @@ -96,6 +136,16 @@ bool SwitchHelper::parseSwHashLagHash(SwitchHash &hash, const std::string &field return parseSwHashFieldList(hash.lag_hash, field, value); } +bool SwitchHelper::parseSwHashEcmpHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashAlgorithm(hash.ecmp_hash_algorithm, field, value); +} + +bool SwitchHelper::parseSwHashLagHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const +{ + return parseSwHashAlgorithm(hash.lag_hash_algorithm, field, value); +} + bool SwitchHelper::parseSwHash(SwitchHash &hash) const { SWSS_LOG_ENTER(); @@ -119,6 +169,20 @@ bool SwitchHelper::parseSwHash(SwitchHash &hash) const return false; } } + else if (field == SWITCH_HASH_ECMP_HASH_ALGORITHM) + { + if (!parseSwHashEcmpHashAlgorithm(hash, field, value)) + { + return false; + } + } + else if (field == SWITCH_HASH_LAG_HASH_ALGORITHM) + { + if (!parseSwHashLagHashAlgorithm(hash, field, value)) + { + return false; + } + } else { SWSS_LOG_WARN("Unknown field(%s): skipping ...", field.c_str()); @@ -132,7 +196,10 @@ bool SwitchHelper::validateSwHash(SwitchHash &hash) const { SWSS_LOG_ENTER(); - if (!hash.ecmp_hash.is_set && !hash.lag_hash.is_set) + auto cond = hash.ecmp_hash.is_set || hash.lag_hash.is_set; + cond = cond || hash.ecmp_hash_algorithm.is_set || hash.lag_hash_algorithm.is_set; + + if (!cond) { SWSS_LOG_ERROR("Validation error: missing valid fields"); return false; diff --git a/orchagent/switch/switch_helper.h b/orchagent/switch/switch_helper.h index 611ce2b6fbd..d7be11981fe 100644 --- a/orchagent/switch/switch_helper.h +++ b/orchagent/switch/switch_helper.h @@ -1,5 +1,7 @@ #pragma once +#include + #include "switch_container.h" class SwitchHelper final @@ -16,9 +18,13 @@ class SwitchHelper final private: template bool parseSwHashFieldList(T &obj, const std::string &field, const std::string &value) const; + template + bool parseSwHashAlgorithm(T &obj, const std::string &field, const std::string &value) const; bool parseSwHashEcmpHash(SwitchHash &hash, const std::string &field, const std::string &value) const; bool parseSwHashLagHash(SwitchHash &hash, const std::string &field, const std::string &value) const; + bool parseSwHashEcmpHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const; + bool parseSwHashLagHashAlgorithm(SwitchHash &hash, const std::string &field, const std::string &value) const; bool validateSwHash(SwitchHash &hash) const; diff --git a/orchagent/switch/switch_schema.h b/orchagent/switch/switch_schema.h index c836eff120c..16a17f179c6 100644 --- a/orchagent/switch/switch_schema.h +++ b/orchagent/switch/switch_schema.h @@ -23,3 +23,14 @@ #define SWITCH_HASH_ECMP_HASH "ecmp_hash" #define SWITCH_HASH_LAG_HASH "lag_hash" + +#define SWITCH_HASH_ALGORITHM_CRC "CRC" +#define SWITCH_HASH_ALGORITHM_XOR "XOR" +#define SWITCH_HASH_ALGORITHM_RANDOM "RANDOM" +#define SWITCH_HASH_ALGORITHM_CRC_32LO "CRC_32LO" +#define SWITCH_HASH_ALGORITHM_CRC_32HI "CRC_32HI" +#define SWITCH_HASH_ALGORITHM_CRC_CCITT "CRC_CCITT" +#define SWITCH_HASH_ALGORITHM_CRC_XOR "CRC_XOR" + +#define SWITCH_HASH_ECMP_HASH_ALGORITHM "ecmp_hash_algorithm" +#define SWITCH_HASH_LAG_HASH_ALGORITHM "lag_hash_algorithm" diff --git a/orchagent/switchorch.cpp b/orchagent/switchorch.cpp index 76c17812d49..06dc36e4723 100644 --- a/orchagent/switchorch.cpp +++ b/orchagent/switchorch.cpp @@ -497,6 +497,17 @@ bool SwitchOrch::setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHa return status == SAI_STATUS_SUCCESS; } +bool SwitchOrch::setSwitchHashAlgorithmSai(const SwitchHash &hash, bool isEcmpHash) const +{ + sai_attribute_t attr; + + attr.id = isEcmpHash ? SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM : SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM; + attr.value.s32 = static_cast(isEcmpHash ? hash.ecmp_hash_algorithm.value : hash.lag_hash_algorithm.value); + + auto status = sai_switch_api->set_switch_attribute(gSwitchId, &attr); + return status == SAI_STATUS_SUCCESS; +} + bool SwitchOrch::setSwitchHash(const SwitchHash &hash) { SWSS_LOG_ENTER(); @@ -574,6 +585,76 @@ bool SwitchOrch::setSwitchHash(const SwitchHash &hash) } } + if (hash.ecmp_hash_algorithm.is_set) + { + if (!hObj.ecmp_hash_algorithm.is_set || (hObj.ecmp_hash_algorithm.value != hash.ecmp_hash_algorithm.value)) + { + if (swCap.isSwitchEcmpHashAlgorithmSupported()) + { + if (!swCap.validateSwitchEcmpHashAlgorithmCap(hash.ecmp_hash_algorithm.value)) + { + SWSS_LOG_ERROR("Failed to validate switch ECMP hash algorithm: capability is not supported"); + return false; + } + + if (!setSwitchHashAlgorithmSai(hash, true)) + { + SWSS_LOG_ERROR("Failed to set switch ECMP hash algorithm in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch ECMP hash algorithm configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.ecmp_hash_algorithm.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch ECMP hash algorithm configuration: operation is not supported"); + return false; + } + } + + if (hash.lag_hash_algorithm.is_set) + { + if (!hObj.lag_hash_algorithm.is_set || (hObj.lag_hash_algorithm.value != hash.lag_hash_algorithm.value)) + { + if (swCap.isSwitchLagHashAlgorithmSupported()) + { + if (!swCap.validateSwitchLagHashAlgorithmCap(hash.lag_hash_algorithm.value)) + { + SWSS_LOG_ERROR("Failed to validate switch LAG hash algorithm: capability is not supported"); + return false; + } + + if (!setSwitchHashAlgorithmSai(hash, false)) + { + SWSS_LOG_ERROR("Failed to set switch LAG hash algorithm in SAI"); + return false; + } + + cfgUpd = true; + } + else + { + SWSS_LOG_WARN("Switch LAG hash algorithm configuration is not supported: skipping ..."); + } + } + } + else + { + if (hObj.lag_hash_algorithm.is_set) + { + SWSS_LOG_ERROR("Failed to remove switch LAG hash algorithm configuration: operation is not supported"); + return false; + } + } + // Don't update internal cache when config remains unchanged if (!cfgUpd) { diff --git a/orchagent/switchorch.h b/orchagent/switchorch.h index c6ee9997f94..7135bcdc395 100644 --- a/orchagent/switchorch.h +++ b/orchagent/switchorch.h @@ -58,6 +58,7 @@ class SwitchOrch : public Orch // Switch hash bool setSwitchHashFieldListSai(const SwitchHash &hash, bool isEcmpHash) const; + bool setSwitchHashAlgorithmSai(const SwitchHash &hash, bool isEcmpHash) const; bool setSwitchHash(const SwitchHash &hash); bool getSwitchHashOidSai(sai_object_id_t &oid, bool isEcmpHash) const; diff --git a/tests/conftest.py b/tests/conftest.py index a431f46cc4e..9264417214d 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -27,6 +27,7 @@ from dvslib import dvs_mirror from dvslib import dvs_policer from dvslib import dvs_hash +from dvslib import dvs_switch from buffer_model import enable_dynamic_buffer @@ -160,6 +161,8 @@ def _populate_default_asic_db_values(self) -> None: self.default_hash_keys = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_HASH") + self.default_switch_keys = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_SWITCH") + self.default_copp_policers = self.get_keys("ASIC_STATE:SAI_OBJECT_TYPE_POLICER") @@ -1348,6 +1351,7 @@ def get_asic_db(self) -> AsicDbValidator: db.default_acl_tables = self.asicdb.default_acl_tables db.default_acl_entries = self.asicdb.default_acl_entries db.default_hash_keys = self.asicdb.default_hash_keys + db.default_switch_keys = self.asicdb.default_switch_keys db.default_copp_policers = self.asicdb.default_copp_policers db.port_name_map = self.asicdb.portnamemap db.default_vlan_id = self.asicdb.default_vlan_id @@ -1937,6 +1941,10 @@ def dvs_hash_manager(request, dvs): request.cls.dvs_hash = dvs_hash.DVSHash(dvs.get_asic_db(), dvs.get_config_db()) +@pytest.fixture(scope="class") +def dvs_switch_manager(request, dvs): + request.cls.dvs_switch = dvs_switch.DVSSwitch(dvs.get_asic_db()) + ##################### DPB fixtures ########################################### def create_dpb_config_file(dvs): cmd = "sonic-cfggen -j /etc/sonic/init_cfg.json -j /tmp/ports.json --print-data > /tmp/dpb_config_db.json" diff --git a/tests/dvslib/dvs_switch.py b/tests/dvslib/dvs_switch.py new file mode 100644 index 00000000000..b57dc7082fa --- /dev/null +++ b/tests/dvslib/dvs_switch.py @@ -0,0 +1,96 @@ +"""Utilities for interacting with SWITCH objects when writing VS tests.""" +from typing import Dict, List + + +class DVSSwitch: + """Manage switch objects on the virtual switch.""" + + ADB_SWITCH = "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH" + + def __init__(self, asic_db): + """Create a new DVS switch manager.""" + self.asic_db = asic_db + + def get_switch_ids( + self, + expected: int = None + ) -> List[str]: + """Get all of the switch ids in ASIC DB. + + Args: + expected: The number of switch ids that are expected to be present in ASIC DB. + + Returns: + The list of switch ids in ASIC DB. + """ + if expected is None: + return self.asic_db.get_keys(self.ADB_SWITCH) + + num_keys = len(self.asic_db.default_switch_keys) + expected + keys = self.asic_db.wait_for_n_keys(self.ADB_SWITCH, num_keys) + + for k in self.asic_db.default_switch_keys: + assert k in keys + + return [k for k in keys if k not in self.asic_db.default_switch_keys] + + def verify_switch_count( + self, + expected: int + ) -> None: + """Verify that there are N switch objects in ASIC DB. + + Args: + expected: The number of switch ids that are expected to be present in ASIC DB. + """ + self.get_switch_ids(expected) + + def verify_switch_generic( + self, + sai_switch_id: str, + sai_qualifiers: Dict[str, str] + ) -> None: + """Verify that switch object has correct ASIC DB representation. + + Args: + sai_switch_id: The specific switch id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + """ + entry = self.asic_db.wait_for_entry(self.ADB_SWITCH, sai_switch_id) + + for k, v in entry.items(): + if k == "NULL": + continue + elif k in sai_qualifiers: + if k == "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM": + assert sai_qualifiers[k] == v + elif k == "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM": + assert sai_qualifiers[k] == v + else: + assert False, "Unknown SAI qualifier: key={}, value={}".format(k, v) + + def verify_switch( + self, + sai_switch_id: str, + sai_qualifiers: Dict[str, str], + strict: bool = False + ) -> None: + """Verify that switch object has correct ASIC DB representation. + + Args: + sai_switch_id: The specific switch id to check in ASIC DB. + sai_qualifiers: The expected set of SAI qualifiers to be found in ASIC DB. + strict: Specifies whether verification should be strict + """ + if strict: + self.verify_switch_generic(sai_switch_id, sai_qualifiers) + return + + entry = self.asic_db.wait_for_entry(self.ADB_SWITCH, sai_switch_id) + + attr_dict = { + **entry, + **sai_qualifiers + } + + self.verify_switch_generic(sai_switch_id, attr_dict) diff --git a/tests/gcov_support.sh b/tests/gcov_support.sh index c7ddddb961b..94e2b6aba17 100755 --- a/tests/gcov_support.sh +++ b/tests/gcov_support.sh @@ -125,17 +125,15 @@ lcov_merge_all() fi done < infolist - lcov --extract total.info '*sonic-gcov/*' -o total.info - # Remove unit test files. - lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/orchagent/p4orch/tests/*" - lcov -o total.info -r total.info "*sonic-gcov/common_work/gcov/tests/*" + lcov -o total.info -r total.info "*tests/*" + lcov -o total.info -r total.info "/usr/*" cp $1/lcov_cobertura.py $1/common_work/gcov/ python $1/common_work/gcov/lcov_cobertura.py total.info -o coverage.xml - sed -i "s#common_work/gcov/##" coverage.xml - sed -i "s#common_work.gcov.##" coverage.xml + sed -i "s#../../__w/1/s/##" coverage.xml + sed -i "s#......__w.1.s.##" coverage.xml cd gcov_output/ if [ ! -d ${ALLMERGE_DIR} ]; then @@ -385,9 +383,7 @@ main() echo "Usage:" echo " collect collect .gcno files based on module" echo " collect_gcda collect .gcda files" - echo " collect_gcda_files collect .gcda files in a docker" echo " generate generate gcov report in html form (all or submodule_name)" - echo " tar_output tar gcov_output forder" echo " merge_container_info merge homonymic info files from different container" echo " set_environment set environment ready for report generating in containers" esac diff --git a/tests/mock_tests/Makefile.am b/tests/mock_tests/Makefile.am index 2156a5dd1cf..b5da2e2fbbc 100644 --- a/tests/mock_tests/Makefile.am +++ b/tests/mock_tests/Makefile.am @@ -70,6 +70,7 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/cbf/nhgmaporch.cpp \ $(top_srcdir)/orchagent/neighorch.cpp \ $(top_srcdir)/orchagent/intfsorch.cpp \ + $(top_srcdir)/orchagent/port/port_capabilities.cpp \ $(top_srcdir)/orchagent/port/porthlpr.cpp \ $(top_srcdir)/orchagent/portsorch.cpp \ $(top_srcdir)/orchagent/fabricportsorch.cpp \ @@ -119,6 +120,8 @@ tests_SOURCES = aclorch_ut.cpp \ $(top_srcdir)/orchagent/zmqorch.cpp \ $(top_srcdir)/orchagent/dash/dashaclorch.cpp \ $(top_srcdir)/orchagent/dash/dashorch.cpp \ + $(top_srcdir)/orchagent/dash/dashaclgroupmgr.cpp \ + $(top_srcdir)/orchagent/dash/dashtagmgr.cpp \ $(top_srcdir)/orchagent/dash/dashrouteorch.cpp \ $(top_srcdir)/orchagent/dash/dashvnetorch.cpp \ $(top_srcdir)/cfgmgr/buffermgrdyn.cpp \ diff --git a/tests/mock_tests/aclorch_ut.cpp b/tests/mock_tests/aclorch_ut.cpp index b03d2191380..8a05e9188ef 100644 --- a/tests/mock_tests/aclorch_ut.cpp +++ b/tests/mock_tests/aclorch_ut.cpp @@ -459,6 +459,8 @@ namespace aclorch_test gMirrorOrch = nullptr; delete gRouteOrch; gRouteOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; delete gNeighOrch; gNeighOrch = nullptr; delete gFdbOrch; @@ -473,8 +475,6 @@ namespace aclorch_test gPortsOrch = nullptr; delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; auto status = sai_switch_api->remove_switch(gSwitchId); ASSERT_EQ(status, SAI_STATUS_SUCCESS); diff --git a/tests/mock_tests/flowcounterrouteorch_ut.cpp b/tests/mock_tests/flowcounterrouteorch_ut.cpp index 10a52c6b058..cf80bda5bf6 100644 --- a/tests/mock_tests/flowcounterrouteorch_ut.cpp +++ b/tests/mock_tests/flowcounterrouteorch_ut.cpp @@ -299,6 +299,9 @@ namespace flowcounterrouteorch_test delete gBfdOrch; gBfdOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; + delete gNeighOrch; gNeighOrch = nullptr; @@ -314,9 +317,6 @@ namespace flowcounterrouteorch_test delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; - delete gRouteOrch; gRouteOrch = nullptr; diff --git a/tests/mock_tests/intfsorch_ut.cpp b/tests/mock_tests/intfsorch_ut.cpp index 60041520bd0..ffbf348ed45 100644 --- a/tests/mock_tests/intfsorch_ut.cpp +++ b/tests/mock_tests/intfsorch_ut.cpp @@ -249,6 +249,9 @@ namespace intfsorch_test delete gBfdOrch; gBfdOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; + delete gNeighOrch; gNeighOrch = nullptr; @@ -264,9 +267,6 @@ namespace intfsorch_test delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; - delete gRouteOrch; gRouteOrch = nullptr; diff --git a/tests/mock_tests/portsorch_ut.cpp b/tests/mock_tests/portsorch_ut.cpp index c338b9fbebe..fca4f34bebf 100644 --- a/tests/mock_tests/portsorch_ut.cpp +++ b/tests/mock_tests/portsorch_ut.cpp @@ -314,6 +314,7 @@ namespace portsorch_test vector ports_tables = { { APP_PORT_TABLE_NAME, portsorch_base_pri + 5 }, + { APP_SEND_TO_INGRESS_PORT_TABLE_NAME, portsorch_base_pri + 5 }, { APP_VLAN_TABLE_NAME, portsorch_base_pri + 2 }, { APP_VLAN_MEMBER_TABLE_NAME, portsorch_base_pri }, { APP_LAG_TABLE_NAME, portsorch_base_pri + 4 }, @@ -707,7 +708,13 @@ namespace portsorch_test { "post1", "0x10,0x12,0x11,0x13" }, { "post2", "0x10,0x12,0x11,0x13" }, { "post3", "0x10,0x12,0x11,0x13" }, - { "attn", "0x80,0x82,0x81,0x83" } + { "attn", "0x80,0x82,0x81,0x83" }, + { "ob_m2lp", "0x4,0x6,0x5,0x7" }, + { "ob_alev_out", "0xf,0x11,0x10,0x12" }, + { "obplev", "0x69,0x6b,0x6a,0x6c" }, + { "obnlev", "0x5f,0x61,0x60,0x62" }, + { "regn_bfm1p", "0x1e,0x20,0x1f,0x21" }, + { "regn_bfm1n", "0xaa,0xac,0xab,0xad" } } }}; @@ -766,6 +773,30 @@ namespace portsorch_test std::vector attn = { 0x80, 0x82, 0x81, 0x83 }; ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_FIR_ATTN), attn); + // Verify ob_m2lp + std::vector ob_m2lp = { 0x4, 0x6, 0x5, 0x7 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PAM4_RATIO), ob_m2lp); + + // Verify ob_alev_out + std::vector ob_alev_out = { 0xf, 0x11, 0x10, 0x12 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_OUT_COMMON_MODE), ob_alev_out); + + // Verify obplev + std::vector obplev = { 0x69, 0x6b, 0x6a, 0x6c }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PMOS_COMMON_MODE), obplev); + + // Verify obnlev + std::vector obnlev = { 0x5f, 0x61, 0x60, 0x62 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_NMOS_COMMON_MODE), obnlev); + + // Verify regn_bfm1p + std::vector regn_bfm1p = { 0x1e, 0x20, 0x1f, 0x21 }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_PMOS_VLTG_REG), regn_bfm1p); + + // Verify regn_bfm1n + std::vector regn_bfm1n = { 0xaa, 0xac, 0xab, 0xad }; + ASSERT_EQ(p.m_preemphasis.at(SAI_PORT_SERDES_ATTR_TX_NMOS_VLTG_REG), regn_bfm1n); + // Dump pending tasks std::vector taskList; gPortsOrch->dumpPendingTasks(taskList); @@ -1236,6 +1267,7 @@ namespace portsorch_test TEST_F(PortsOrchTest, PortReadinessColdBoot) { Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME); + Table sendToIngressPortTable = Table(m_app_db.get(), APP_SEND_TO_INGRESS_PORT_TABLE_NAME); Table pgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME); Table pgTableCfg = Table(m_config_db.get(), CFG_BUFFER_PG_TABLE_NAME); Table profileTable = Table(m_app_db.get(), APP_BUFFER_PROFILE_TABLE_NAME); @@ -1289,6 +1321,8 @@ namespace portsorch_test // Set PortConfigDone portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } }); + // Populate send to ingresss port table + sendToIngressPortTable.set("SEND_TO_INGRESS", {{"NULL", "NULL"}}); // refill consumer gPortsOrch->addExistingData(&portTable); diff --git a/tests/mock_tests/routeorch_ut.cpp b/tests/mock_tests/routeorch_ut.cpp index 091dabed6a3..8073aee92b0 100644 --- a/tests/mock_tests/routeorch_ut.cpp +++ b/tests/mock_tests/routeorch_ut.cpp @@ -329,6 +329,9 @@ namespace routeorch_test delete gIntfsOrch; gIntfsOrch = nullptr; + delete gSrv6Orch; + gSrv6Orch = nullptr; + delete gNeighOrch; gNeighOrch = nullptr; @@ -338,9 +341,6 @@ namespace routeorch_test delete gFgNhgOrch; gFgNhgOrch = nullptr; - delete gSrv6Orch; - gSrv6Orch = nullptr; - delete gRouteOrch; gRouteOrch = nullptr; diff --git a/tests/test_acl.py b/tests/test_acl.py index d0bad2c5099..cf68d1516e9 100644 --- a/tests/test_acl.py +++ b/tests/test_acl.py @@ -314,6 +314,22 @@ def test_AclRuleVlanId(self, dvs_acl, l3_acl_table): dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) dvs_acl.verify_no_acl_rules() + def test_AclRuleIPTypeNonIpv4(self, dvs_acl, l3_acl_table): + config_qualifiers = {"IP_TYPE": "NON_IPv4"} + expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE": dvs_acl.get_simple_qualifier_comparator("SAI_ACL_IP_TYPE_NON_IPV4&mask:0xffffffffffffffff") + } + + dvs_acl.create_acl_rule(L3_TABLE_NAME, L3_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, "Active") + dvs_acl.verify_acl_rule(expected_sai_qualifiers) + + dvs_acl.remove_acl_rule(L3_TABLE_NAME, L3_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3_TABLE_NAME, L3_RULE_NAME, None) + dvs_acl.verify_no_acl_rules() + def test_V6AclTableCreationDeletion(self, dvs_acl): try: dvs_acl.create_acl_table(L3V6_TABLE_NAME, @@ -332,6 +348,22 @@ def test_V6AclTableCreationDeletion(self, dvs_acl): dvs_acl.verify_acl_table_status(L3V6_TABLE_NAME, None) dvs_acl.verify_acl_table_count(0) + def test_V6AclRuleIPTypeNonIpv6(self, dvs_acl, l3v6_acl_table): + config_qualifiers = {"IP_TYPE": "NON_IPv6"} + expected_sai_qualifiers = { + "SAI_ACL_ENTRY_ATTR_FIELD_ACL_IP_TYPE": dvs_acl.get_simple_qualifier_comparator("SAI_ACL_IP_TYPE_NON_IPV6&mask:0xffffffffffffffff") + } + + dvs_acl.create_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME, config_qualifiers) + # Verify status is written into STATE_DB + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, "Active") + dvs_acl.verify_acl_rule(expected_sai_qualifiers) + + dvs_acl.remove_acl_rule(L3V6_TABLE_NAME, L3V6_RULE_NAME) + # Verify the STATE_DB entry is removed + dvs_acl.verify_acl_rule_status(L3V6_TABLE_NAME, L3V6_RULE_NAME, None) + dvs_acl.verify_no_acl_rules() + def test_V6AclRuleIPv6Any(self, dvs_acl, l3v6_acl_table): config_qualifiers = {"IP_TYPE": "IPv6ANY"} expected_sai_qualifiers = { diff --git a/tests/test_dash_acl.py b/tests/test_dash_acl.py index a0cc1b2ccfb..446f0ee7c2f 100644 --- a/tests/test_dash_acl.py +++ b/tests/test_dash_acl.py @@ -1,3 +1,5 @@ +from collections import namedtuple + from swsscommon import swsscommon from dvslib.dvs_database import DVSDatabase @@ -8,6 +10,7 @@ from dash_api.acl_rule_pb2 import * from dash_api.acl_in_pb2 import * from dash_api.acl_out_pb2 import * +from dash_api.prefix_tag_pb2 import * from dash_api.types_pb2 import * from typing import Union @@ -23,14 +26,21 @@ ACL_GROUP_1 = "acl_group_1" ACL_GROUP_2 = "acl_group_2" +ACL_GROUP_3 = "acl_group_3" ACL_RULE_1 = "1" ACL_RULE_2 = "2" ACL_RULE_3 = "3" ACL_STAGE_1 = "1" ACL_STAGE_2 = "2" +ACL_STAGE_3 = "3" +TAG_1 = "tag_1" +TAG_2 = "tag_2" +TAG_3 = "tag_3" SAI_NULL_OID = "oid:0x0" +PortRange = namedtuple('PortRange', ['min', 'max']) + def to_string(value): if isinstance(value, bool): return "true" if value else "false" @@ -45,6 +55,23 @@ def get_sai_stage(outbound, v4, stage_num): return "SAI_ENI_ATTR_{}_{}_STAGE{}_DASH_ACL_GROUP_ID".format(direction, ip_version, stage_num) +def prefix_list_to_set(prefix_list: str): + count, prefixes = prefix_list.split(":") + + ps = set(prefixes.split(",")) + assert len(ps) == int(count) + + return ps + + +def to_ip_prefix(prefix): + net = ipaddress.IPv4Network(prefix, False) + pfx = IpPrefix() + pfx.ip.ipv4 = socket.htonl(int(net.network_address)) + pfx.mask.ipv4 = socket.htonl(int(net.netmask)) + return pfx + + class ProduceStateTable(object): def __init__(self, database, table_name: str): self.table = swsscommon.ProducerStateTable( @@ -104,6 +131,7 @@ def __getitem__(self, key: str): APPL_DB_TABLE_LIST = [ + swsscommon.APP_DASH_PREFIX_TAG_TABLE_NAME, swsscommon.APP_DASH_ACL_IN_TABLE_NAME, swsscommon.APP_DASH_ACL_OUT_TABLE_NAME, swsscommon.APP_DASH_ACL_GROUP_TABLE_NAME, @@ -127,7 +155,7 @@ def __init__(self, dvs): self.dvs.get_app_db(), table ) table_variable_name = "app_{}".format(table.lower()) - # Based on swsscommon convention for table names, assume + # Based on swsscommon convention for table names, assume # e.g. swsscommon.APP_DASH_ENI_TABLE_NAME == "DASH_ENI_TABLE", therefore # the ProducerStateTable object for swsscommon.APP_DASH_ENI_TABLE_NAME # will be accessible as `self.app_dash_eni_table` @@ -153,14 +181,67 @@ def __init__(self, dvs): self.asic_vnet_table ] - def create_acl_rule(self, group_id, rule_id, pb): - self.app_dash_acl_rule_table[str( - group_id) + ":" + str(rule_id)] = {"pb": pb.SerializeToString()} + def create_prefix_tag(self, name, ip_version, prefixes): + pb = PrefixTag() + pb.ip_version = ip_version + for prefix in prefixes: + pb.prefix_list.append(to_ip_prefix(prefix)) + self.app_dash_prefix_tag_table[str(name)] = {"pb": pb.SerializeToString()} + + def remove_prefix_tag(self, tag_id): + del self.app_dash_prefix_tag_table[str(tag_id)] + + def create_acl_rule(self, group_id, rule_id, action, terminating, priority, protocol=None, + src_addr=None, dst_addr=None, + src_tag=None, dst_tag=None, + src_port=None, dst_port=None): + pb = AclRule() + pb.priority = priority + pb.action = action + pb.terminating = terminating + + if protocol: + map(pb.protocol.append, protocol) + + if src_addr: + for addr in src_addr: + pb.src_addr.append(to_ip_prefix(addr)) + + if dst_addr: + for addr in dst_addr: + pb.dst_addr.append(to_ip_prefix(addr)) + + if src_tag: + for tag in src_tag: + pb.src_tag.append(tag.encode()) + + if dst_tag: + for tag in dst_tag: + pb.dst_tag.append(tag.encode()) + + if src_port: + for pr in src_port: + vr = ValueOrRange() + vr.range.min = pr.min + vr.range.max = pr.max + pb.src_port.append(vr) + + if dst_port: + for pr in dst_port: + vr = ValueOrRange() + vr.range.min = pr.min + vr.range.max = pr.max + pb.dst_port.append(vr) + + self.app_dash_acl_rule_table[str(group_id) + ":" + str(rule_id)] = {"pb": pb.SerializeToString()} + def remove_acl_rule(self, group_id, rule_id): del self.app_dash_acl_rule_table[str(group_id) + ":" + str(rule_id)] - def create_acl_group(self, group_id, pb): + def create_acl_group(self, group_id, ip_version): + pb = AclGroup() + pb.ip_version = IpVersion.IP_VERSION_IPV4 self.app_dash_acl_group_table[str(group_id)] = {"pb": pb.SerializeToString()} def remove_acl_group(self, group_id): @@ -209,6 +290,7 @@ def unbind_acl_out(self, eni, stage): del self.app_dash_acl_out_table[str(eni) + ":" + str(stage)] + class TestAcl(object): @pytest.fixture def ctx(self, dvs): @@ -252,32 +334,25 @@ def ctx(self, dvs): for table in acl_context.asic_db_tables: table.wait_for_n_keys(num_keys=0) + def bind_acl_group(self, ctx, stage_id, group_id, group_oid): + ctx.bind_acl_in(self.eni_name, stage_id, group_id) + self.verify_group_is_bound_to_eni(ctx, stage_id, group_oid) + + def verify_group_is_bound_to_eni(self, ctx, stage_id, group_oid): + eni_key = ctx.asic_eni_table.get_keys()[0] + sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=stage_id) + ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: group_oid}) + assert sai_stage in ctx.asic_eni_table[eni_key] + assert ctx.asic_eni_table[eni_key][sai_stage] == group_oid + def test_acl_flow(self, ctx): - pb = AclGroup() - pb.ip_version = IpVersion.IP_VERSION_IPV4 - ctx.create_acl_group(ACL_GROUP_1, pb) - pb = AclRule() - pb.priority = 1 - pb.action = Action.ACTION_PERMIT - pb.terminating = False - net = ipaddress.IPv4Network("192.168.0.1/32", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - net = ipaddress.IPv4Network("192.168.1.2/30", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - vr = ValueOrRange() - vr.range.min = 0 - vr.range.max = 1 - pb.src_port.append(vr) - pb.dst_port.append(vr) - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, pb) + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] group1_id= ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] @@ -293,9 +368,18 @@ def test_acl_flow(self, ctx): assert group1_attr["SAI_DASH_ACL_GROUP_ATTR_IP_ADDR_FAMILY"] == "SAI_IP_ADDR_FAMILY_IPV4" # Create multiple rules - pb.priority = 2 - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, pb) - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_3, pb) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=2, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_3, + priority=3, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) @@ -306,31 +390,13 @@ def test_acl_flow(self, ctx): ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) def test_acl_group(self, ctx): - pb = AclGroup() - pb.ip_version = IpVersion.IP_VERSION_IPV6 - ctx.create_acl_group(ACL_GROUP_1, pb) - pb = AclRule() - pb.priority = 1 - pb.action = Action.ACTION_PERMIT - pb.terminating = False - net = ipaddress.IPv4Network("192.168.0.1/32", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - net = ipaddress.IPv4Network("192.168.1.2/30", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - vr = ValueOrRange() - vr.range.min = 0 - vr.range.max = 1 - pb.src_port.append(vr) - pb.dst_port.append(vr) - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, pb) + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV6) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) # Remove group before removing its rule @@ -348,79 +414,39 @@ def test_empty_acl_group_binding(self, ctx): Verifies behavior when binding ACL groups """ eni_key = ctx.asic_eni_table.get_keys()[0] - sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=ACL_STAGE_1) + sai_stage = get_sai_stage(outbound=True, v4=True, stage_num=ACL_STAGE_1) - pb = AclGroup() - pb.ip_version = IpVersion.IP_VERSION_IPV4 - ctx.create_acl_group(ACL_GROUP_1, pb) + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) acl_group_key = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] - ctx.bind_acl_in(self.eni_name, ACL_STAGE_1, v4_group_id = ACL_GROUP_1) + ctx.bind_acl_out(self.eni_name, ACL_STAGE_1, v4_group_id = ACL_GROUP_1) time.sleep(3) # Binding should not happen yet because the ACL group is empty assert sai_stage not in ctx.asic_eni_table[eni_key] - pb = AclRule() - pb.priority = 1 - pb.action = Action.ACTION_PERMIT - pb.terminating = False - net = ipaddress.IPv4Network("192.168.0.1/32", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - net = ipaddress.IPv4Network("192.168.1.2/30", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - vr = ValueOrRange() - vr.range.min = 0 - vr.range.max = 1 - pb.src_port.append(vr) - pb.dst_port.append(vr) - - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, pb) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + # Now that the group contains a rule, expect binding to occur ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: acl_group_key}) # Unbinding should occur immediately - ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + ctx.unbind_acl_out(self.eni_name, ACL_STAGE_1) ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: SAI_NULL_OID}) def test_acl_group_binding(self, ctx): eni_key = ctx.asic_eni_table.get_keys()[0] sai_stage = get_sai_stage(outbound=False, v4=True, stage_num=ACL_STAGE_2) - pb = AclGroup() - pb.ip_version = IpVersion.IP_VERSION_IPV4 - ctx.create_acl_group(ACL_GROUP_2, pb) + ctx.create_acl_group(ACL_GROUP_2, IpVersion.IP_VERSION_IPV4) acl_group_key = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] - pb = AclRule() - pb.priority = 1 - pb.action = Action.ACTION_PERMIT - pb.terminating = False - net = ipaddress.IPv4Network("192.168.0.1/32", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - net = ipaddress.IPv4Network("192.168.1.2/30", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - vr = ValueOrRange() - vr.range.min = 0 - vr.range.max = 1 - pb.src_port.append(vr) - pb.dst_port.append(vr) - - ctx.create_acl_rule(ACL_GROUP_2, ACL_RULE_1, pb) + ctx.create_acl_rule(ACL_GROUP_2, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + ctx.bind_acl_in(self.eni_name, ACL_STAGE_2, v4_group_id = ACL_GROUP_2) # Binding should occurr immediately since we added a rule to the group prior to binding ctx.asic_eni_table.wait_for_field_match(key=eni_key, expected_fields={sai_stage: acl_group_key}) @@ -430,70 +456,28 @@ def test_acl_group_binding(self, ctx): def test_acl_rule(self, ctx): # Create acl rule before acl group - pb = AclRule() - pb.priority = 1 - pb.action = Action.ACTION_PERMIT - pb.terminating = False - net = ipaddress.IPv4Network("192.168.0.1/32", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - net = ipaddress.IPv4Network("192.168.1.2/30", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.src_addr.append(pfx) - pb.dst_addr.append(pfx) - vr = ValueOrRange() - vr.range.min = 0 - vr.range.max = 1 - pb.src_port.append(vr) - pb.dst_port.append(vr) - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, pb) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) time.sleep(3) ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=0) - pb = AclGroup() - pb.ip_version = IpVersion.IP_VERSION_IPV4 - ctx.create_acl_group(ACL_GROUP_1, pb) + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) # Create acl rule with nonexistent acl group, which should never get programmed to ASIC_DB - ctx.create_acl_rule("0", "0", pb) + ctx.create_acl_rule("0", "0", + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) time.sleep(3) ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) -# # Create acl with invalid attribute -# ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, {"priority": "abc"}) -# time.sleep(3) -# ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) - -# # Create acl without some mandatory attributes at first -# ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, {"priority": "1", "action": "allow", "terminating": "false"}) -# time.sleep(3) -# ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1) - - pb = AclRule() - net = ipaddress.IPv4Network("192.168.0.1/32", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.dst_addr.append(pfx) - net = ipaddress.IPv4Network("192.168.1.2/30", False) - pfx = IpPrefix() - pfx.ip.ipv4 = socket.htonl(int(net.network_address)) - pfx.mask.ipv4 = socket.htonl(int(net.netmask)) - pb.dst_addr.append(pfx) - vr = ValueOrRange() - vr.range.min = 0 - vr.range.max = 1 - pb.dst_port.append(vr) - - # Expect the rule to be created only after all the mandatory attributes are added - ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, pb) - time.sleep(3) + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_2, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_addr=["192.168.0.1/32", "192.168.1.2/30"], dst_addr=["192.168.0.1/32", "192.168.1.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=2) ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) @@ -503,6 +487,292 @@ def test_acl_rule(self, ctx): ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=0) + @pytest.mark.parametrize("bind_group", [True, False]) + def test_prefix_single_tag(self, ctx, bind_group): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.3.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_tag=[TAG_2], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + + if bind_group: + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + + tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + time.sleep(3) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + if bind_group: + new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + assert new_group1_id != group1_id + self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + + tag2_prefixes = {"192.168.2.0/30", "192.168.3.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + time.sleep(3) + + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1) + rule1_id = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes + + if bind_group: + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_tags(self, ctx, bind_group): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.1.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1, TAG_2], dst_tag=[TAG_2, TAG_3], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) + + if bind_group: + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + + tag2_prefixes = {"192.168.10.0/30", "192.168.11.0/30", "192.168.12.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.13.0.0/16", "3.14.0.0/16", "4.14.4.0/24", "5.15.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + time.sleep(3) + + if bind_group: + new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + assert new_group1_id != group1_id + + self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes.union(tag2_prefixes) + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == tag2_prefixes.union(tag3_prefixes) + + if bind_group: + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + ctx.remove_prefix_tag(TAG_3) + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_tags_and_prefixes(self, ctx, bind_group): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.0/30", "192.168.2.0/30", "192.168.3.0/30"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + prefix_list = {"10.0.0.0/8", "11.1.1.0/24", "11.1.2.0/24"} + + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1, TAG_2, TAG_3], dst_addr=prefix_list, + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + super_set = set() + super_set.update(tag1_prefixes, tag2_prefixes, tag3_prefixes) + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list + + if bind_group: + self.bind_acl_group(ctx, ACL_STAGE_1, ACL_GROUP_1, group1_id) + + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + tag2_prefixes = {"192.168.1.2/32", "192.168.2.2/32", "192.168.1.2/32"} + ctx.create_prefix_tag(TAG_2, IpVersion.IP_VERSION_IPV4, tag2_prefixes) + + tag3_prefixes = {"3.3.0.0/16", "3.4.0.0/16", "4.4.4.0/24", "5.5.5.0/24"} + ctx.create_prefix_tag(TAG_3, IpVersion.IP_VERSION_IPV4, tag3_prefixes) + + time.sleep(3) + + if bind_group: + new_group1_id = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + assert new_group1_id != group1_id + self.verify_group_is_bound_to_eni(ctx, ACL_STAGE_1, new_group1_id) + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + super_set = set() + super_set.update(tag1_prefixes, tag2_prefixes, tag3_prefixes) + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == super_set + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_DIP"]) == prefix_list + + if bind_group: + ctx.unbind_acl_in(self.eni_name, ACL_STAGE_1) + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + ctx.remove_prefix_tag(TAG_3) + + @pytest.mark.parametrize("bind_group", [True, False]) + def test_multiple_groups_prefix_single_tag(self, ctx, bind_group): + groups = [ACL_GROUP_1, ACL_GROUP_2, ACL_GROUP_3] + stages = [ACL_STAGE_1, ACL_STAGE_2, ACL_STAGE_3] + + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + for group in groups: + ctx.create_acl_group(group, IpVersion.IP_VERSION_IPV4) + ctx.create_acl_rule(group, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + group_ids = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=3) + rule_ids = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) + + for rid in rule_ids: + rule_attrs = ctx.asic_dash_acl_rule_table[rid] + assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + if bind_group: + eni_stages = [] + eni_key = ctx.asic_eni_table.get_keys()[0] + for stage, group in zip(stages, groups): + ctx.bind_acl_in(self.eni_name, stage, group) + eni_stages.append(get_sai_stage(outbound=False, v4=True, stage_num=stage)) + + ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) + for stage in eni_stages: + assert ctx.asic_eni_table[eni_key][stage] in group_ids + + tag1_prefixes = {"1.1.2.0/24", "2.3.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + time.sleep(3) + + rule_ids = ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=3) + + for rid in rule_ids: + rule_attrs = ctx.asic_dash_acl_rule_table[rid] + assert prefix_list_to_set(rule_attrs["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + if bind_group: + new_group_ids = ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=3) + + ctx.asic_eni_table.wait_for_fields(key=eni_key, expected_fields=eni_stages) + for stage in eni_stages: + assert ctx.asic_eni_table[eni_key][stage] in new_group_ids + + for stage in stages: + ctx.unbind_acl_in(self.eni_name, stage) + + for group in groups: + ctx.remove_acl_rule(group, ACL_RULE_1) + ctx.remove_acl_group(group) + + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + + def test_tag_remove(self, ctx): + tag1_prefixes = {"1.1.1.0/24", "2.2.0.0/16"} + ctx.create_prefix_tag(TAG_1, IpVersion.IP_VERSION_IPV4, tag1_prefixes) + + ctx.create_acl_group(ACL_GROUP_1, IpVersion.IP_VERSION_IPV4) + ctx.asic_dash_acl_group_table.wait_for_n_keys(num_keys=1)[0] + + # Create acl rule before acl group + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=1, action=Action.ACTION_PERMIT, terminating=False, + src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + + rule1_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule1_attr = ctx.asic_dash_acl_rule_table[rule1_id] + + assert prefix_list_to_set(rule1_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + ctx.remove_prefix_tag(TAG_1) + time.sleep(1) + + ctx.create_acl_rule(ACL_GROUP_1, ACL_RULE_1, + priority=2, action=Action.ACTION_DENY, terminating=False, + src_tag=[TAG_1], dst_addr=["192.168.1.2/30", "192.168.2.2/30", "192.168.3.2/30"], + src_port=[PortRange(0,1)], dst_port=[PortRange(0,1)]) + + rule2_id= ctx.asic_dash_acl_rule_table.wait_for_n_keys(num_keys=1)[0] + rule2_attr = ctx.asic_dash_acl_rule_table[rule2_id] + + assert prefix_list_to_set(rule2_attr["SAI_DASH_ACL_RULE_ATTR_SIP"]) == tag1_prefixes + + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_1) + ctx.remove_acl_rule(ACL_GROUP_1, ACL_RULE_2) + ctx.remove_acl_group(ACL_GROUP_1) + ctx.remove_prefix_tag(TAG_1) + ctx.remove_prefix_tag(TAG_2) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down # before retrying diff --git a/tests/test_fabric_port.py b/tests/test_fabric_port.py new file mode 100644 index 00000000000..a7ad9958b01 --- /dev/null +++ b/tests/test_fabric_port.py @@ -0,0 +1,48 @@ +from swsscommon import swsscommon +from dvslib.dvs_database import DVSDatabase + + +class TestVirtualChassis(object): + def test_voq_switch_fabric_link(self, vst): + """Test fabric link manual isolation commands in VOQ switch. + + By issuing config fabric port isolation command, the value + of isolateStatus field in config_db get changed. This test validates appl_db + updates of a fabric link isolateStatus as the value in config_db changed. + """ + + dvss = vst.dvss + for name in dvss.keys(): + dvs = dvss[name] + # Get the config info + config_db = dvs.get_config_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + + cfg_switch_type = metatbl.get("switch_type") + if cfg_switch_type == "fabric": + + # get config_db information + cdb = dvs.get_config_db() + + # set config_db to isolateStatus: True + cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) + cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "True"}) + + # check if appl_db value changes to isolateStatus: True + adb = dvs.get_app_db() + adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "True"}) + + # cleanup + cdb.update_entry("FABRIC_PORT", "Fabric1", {"isolateStatus": "False"}) + cdb.wait_for_field_match("FABRIC_PORT", "Fabric1", {"isolateStatus": "False"}) + adb.wait_for_field_match("FABRIC_PORT_TABLE", "Fabric1", {"isolateStatus": "False"}) + else: + print( "We do not check switch type:", cfg_switch_type ) + + +# Add Dummy always-pass test at end as workaroud +# for issue when Flaky fail on final test it invokes module tear-down before retrying +def test_nonflaky_dummy(): + pass + + diff --git a/tests/test_hash.py b/tests/test_hash.py index 9c08aabb656..b84dd91eaf9 100644 --- a/tests/test_hash.py +++ b/tests/test_hash.py @@ -32,6 +32,15 @@ "ETHERTYPE", "IN_PORT" ] +HASH_ALGORITHM = [ + "CRC", + "XOR", + "RANDOM", + "CRC_32LO", + "CRC_32HI", + "CRC_CCITT", + "CRC_XOR" +] SAI_HASH_FIELD_LIST = [ "SAI_NATIVE_HASH_FIELD_DST_MAC", @@ -59,9 +68,19 @@ "SAI_NATIVE_HASH_FIELD_ETHERTYPE", "SAI_NATIVE_HASH_FIELD_IN_PORT" ] +SAI_HASH_ALGORITHM = [ + "SAI_HASH_ALGORITHM_CRC", + "SAI_HASH_ALGORITHM_XOR", + "SAI_HASH_ALGORITHM_RANDOM", + "SAI_HASH_ALGORITHM_CRC_32LO", + "SAI_HASH_ALGORITHM_CRC_32HI", + "SAI_HASH_ALGORITHM_CRC_CCITT", + "SAI_HASH_ALGORITHM_CRC_XOR" +] @pytest.mark.usefixtures("dvs_hash_manager") +@pytest.mark.usefixtures("dvs_switch_manager") class TestHashBasicFlows: @pytest.fixture(scope="class") def hashData(self, dvs_hash_manager): @@ -83,6 +102,25 @@ def hashData(self, dvs_hash_manager): hashlogger.info("Deinitialize HASH data") + @pytest.fixture(scope="class") + def switchData(self, dvs_switch_manager): + hashlogger.info("Initialize SWITCH data") + + hashlogger.info("Verify SWITCH count") + self.dvs_switch.verify_switch_count(0) + + hashlogger.info("Get SWITCH id") + switchIdList = self.dvs_switch.get_switch_ids() + + # Assumption: VS has only one SWITCH object + meta_dict = { + "id": switchIdList[0] + } + + yield meta_dict + + hashlogger.info("Deinitialize SWITCH data") + @pytest.mark.parametrize( "hash,field", [ pytest.param( @@ -167,6 +205,86 @@ def test_HashDefaultSwitchGlobalConfiguration(self, hash, field, testlog, hashDa sai_qualifiers=sai_attr_dict ) + @pytest.mark.parametrize( + "algorithm,attr,field", [ + pytest.param( + "ecmp", + "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM", + "ecmp_hash_algorithm", + id="ecmp-hash-algorithm" + ), + pytest.param( + "lag", + "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM", + "lag_hash_algorithm", + id="lag-hash-algorithm" + ) + ] + ) + @pytest.mark.parametrize( + "value", HASH_ALGORITHM + ) + def test_HashAlgorithmSwitchGlobalConfiguration(self, algorithm, attr, field, value, testlog, switchData): + attr_dict = { + field: value + } + + hashlogger.info("Update {} hash algorithm".format(algorithm.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + switchId = switchData["id"] + sai_attr_dict = { + attr: SAI_HASH_ALGORITHM[HASH_ALGORITHM.index(value)] + } + + hashlogger.info("Validate {} hash algorithm".format(algorithm.upper())) + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + + @pytest.mark.parametrize( + "algorithm,attr,field", [ + pytest.param( + "ecmp", + "SAI_SWITCH_ATTR_ECMP_DEFAULT_HASH_ALGORITHM", + "ecmp_hash_algorithm", + id="ecmp-hash-algorithm" + ), + pytest.param( + "lag", + "SAI_SWITCH_ATTR_LAG_DEFAULT_HASH_ALGORITHM", + "lag_hash_algorithm", + id="lag-hash-algorithm" + ) + ] + ) + @pytest.mark.parametrize( + "value", [ "CRC" ] + ) + def test_HashDefaultAlgorithmSwitchGlobalConfiguration(self, algorithm, attr, field, value, testlog, switchData): + attr_dict = { + field: value + } + + hashlogger.info("Update {} hash algorithm".format(algorithm.upper())) + self.dvs_hash.update_switch_hash( + qualifiers=attr_dict + ) + + switchId = switchData["id"] + sai_attr_dict = { + attr: SAI_HASH_ALGORITHM[HASH_ALGORITHM.index(value)] + } + + hashlogger.info("Validate {} hash algorithm".format(algorithm.upper())) + self.dvs_switch.verify_switch( + sai_switch_id=switchId, + sai_qualifiers=sai_attr_dict + ) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying diff --git a/tests/test_mux.py b/tests/test_mux.py index 8681e05fa38..6528fb0a2f0 100644 --- a/tests/test_mux.py +++ b/tests/test_mux.py @@ -542,7 +542,7 @@ def multi_nexthop_test(self, dvs, dvs_route, asicdb, appdb, route, neighbors, ma self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) # move neighbor 1 back - self.add_neighbor(dvs, neighbors[0], macs[i]) + self.add_neighbor(dvs, neighbors[0], macs[0]) print("Testing fdb update in %s, %s for %s" % (start[0], start[1], neighbors[1])) # move neighbor 2 @@ -555,7 +555,7 @@ def multi_nexthop_test(self, dvs, dvs_route, asicdb, appdb, route, neighbors, ma self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) # move neighbor 2 back - self.add_neighbor(dvs, neighbors[0], macs[i]) + self.add_neighbor(dvs, neighbors[1], macs[1]) self.del_route(dvs, route) @@ -584,14 +584,14 @@ def multi_nexthop_test(self, dvs, dvs_route, asicdb, appdb, route, neighbors, ma print("setting %s to %s" % (mux_ports[toggle_index], ACTIVE)) self.set_mux_state(appdb, mux_ports[toggle_index], ACTIVE) if start[keep_index] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) else: self.check_route_nexthop(dvs_route, asicdb, route, neighbors[toggle_index]) else: print("setting %s to %s" % (mux_ports[toggle_index], ACTIVE)) self.set_mux_state(appdb, mux_ports[toggle_index], ACTIVE) if start[keep_index] == ACTIVE: - self.check_route_nexthop(dvs_route, asicdb, route, neighbors[keep_index]) + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) else: self.check_route_nexthop(dvs_route, asicdb, route, neighbors[toggle_index]) @@ -603,8 +603,39 @@ def multi_nexthop_test(self, dvs, dvs_route, asicdb, appdb, route, neighbors, ma self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) self.del_route(dvs, route) + # Check route_updates + self.add_neighbor(dvs, self.SERV3_IPV4, macs[2]) + print("Testing route updates") + self.add_route(dvs, route, neighbors) + print("setting states to active") + for port in mux_ports: + self.set_mux_state(appdb, port, "active") + self.set_mux_state(appdb, "Ethernet4", "active") + + print("triggering another route update") + # add new neighbor to route to force route update + self.add_route(dvs, route, [neighbors[0], self.SERV3_IPV4]) + self.check_route_nexthop(dvs_route, asicdb, route, neighbors[0]) + self.del_route(dvs,route) + + time.sleep(2) + + self.add_route(dvs, route, neighbors) + print("setting states to standby") + for port in mux_ports: + self.set_mux_state(appdb, port, "standby") + self.set_mux_state(appdb, "Ethernet4", "standby") + + print("triggering another route update") + # add new neighbor to route to force route update + self.add_route(dvs, route, [neighbors[0], self.SERV3_IPV4]) + self.add_route(dvs, route, neighbors) + self.check_route_nexthop(dvs_route, asicdb, route, tunnel_nh_id, True) + self.del_route(dvs,route) + for neighbor in neighbors: self.del_neighbor(dvs, neighbor) + self.del_neighbor(dvs, self.SERV3_IPV4) print("Testing add/remove of neighbors") for start in starting_states: @@ -1474,7 +1505,7 @@ def test_NH(self, dvs, dvs_route, intf_fdb_map, setup, setup_mux_cable, def test_multi_nexthop(self, dvs, dvs_route, intf_fdb_map, neighbor_cleanup, testlog): appdb = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0) asicdb = dvs.get_asic_db() - macs = [intf_fdb_map["Ethernet0"], intf_fdb_map["Ethernet4"]] + macs = [intf_fdb_map["Ethernet0"], intf_fdb_map["Ethernet4"], intf_fdb_map["Ethernet8"]] self.create_and_test_multi_nexthop_routes(dvs, dvs_route, appdb, macs, asicdb) diff --git a/tests/test_srv6.py b/tests/test_srv6.py index 68bee80a52b..3ce19421b02 100644 --- a/tests/test_srv6.py +++ b/tests/test_srv6.py @@ -36,6 +36,29 @@ def create_vrf(self, vrf_name): def remove_vrf(self, vrf_name): self.cdb.delete_entry("VRF", vrf_name) + def add_ip_address(self, interface, ip): + self.cdb.create_entry("INTERFACE", interface + "|" + ip, {"NULL": "NULL"}) + + def remove_ip_address(self, interface, ip): + self.cdb.delete_entry("INTERFACE", interface + "|" + ip) + + def add_neighbor(self, interface, ip, mac, family): + table = "ASIC_STATE:SAI_OBJECT_TYPE_NEIGHBOR_ENTRY" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + fvs = swsscommon.FieldValuePairs([("neigh", mac), + ("family", family)]) + tbl.set(interface + ":" + ip, fvs) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_neighbor(self, interface, ip): + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "NEIGH_TABLE") + tbl._del(interface + ":" + ip) + time.sleep(1) + def create_mysid(self, mysid, fvs): table = "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY" existed_entries = get_exist_entries(self.adb.db_connection, table) @@ -50,6 +73,43 @@ def remove_mysid(self, mysid): tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "SRV6_MY_SID_TABLE") tbl._del(mysid) + def create_l3_intf(self, interface, vrf_name): + table = "ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE" + existed_entries = get_exist_entries(self.adb.db_connection, table) + + if len(vrf_name) == 0: + self.cdb.create_entry("INTERFACE", interface, {"NULL": "NULL"}) + else: + self.cdb.create_entry("INTERFACE", interface, {"vrf_name": vrf_name}) + + self.adb.wait_for_n_keys(table, len(existed_entries) + 1) + return get_created_entry(self.adb.db_connection, table, existed_entries) + + def remove_l3_intf(self, interface): + self.cdb.delete_entry("INTERFACE", interface) + + def get_nexthop_id(self, ip_address): + next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for next_hop_entry in next_hop_entries: + (status, fvs) = tbl.get(next_hop_entry) + + assert status == True + assert len(fvs) == 3 + + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_IP" and fv[1] == ip_address: + return next_hop_entry + + return None + + def set_interface_status(self, dvs, interface, admin_status): + tbl_name = "PORT" + tbl = swsscommon.Table(self.cdb.db_connection, tbl_name) + fvs = swsscommon.FieldValuePairs([("admin_status", "up")]) + tbl.set(interface, fvs) + time.sleep(1) + def test_mysid(self, dvs, testlog): self.setup_db(dvs) @@ -58,6 +118,12 @@ def test_mysid(self, dvs, testlog): mysid2='16:8:8:8:baba:2001:20::' mysid3='16:8:8:8:fcbb:bb01:800::' mysid4='16:8:8:8:baba:2001:40::' + mysid5='32:16:16:0:fc00:0:1:e000::' + mysid6='32:16:16:0:fc00:0:1:e001::' + mysid7='32:16:16:0:fc00:0:1:e002::' + mysid8='32:16:16:0:fc00:0:1:e003::' + mysid9='32:16:16:0:fc00:0:1:e004::' + mysid10='32:16:16:0:fc00:0:1:e005::' # create MySID END fvs = swsscommon.FieldValuePairs([('action', 'end')]) @@ -124,15 +190,263 @@ def test_mysid(self, dvs, testlog): elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DT4" + # create interface + self.create_l3_intf("Ethernet104", "") + + # Assign IP to interface + self.add_ip_address("Ethernet104", "2001::2/126") + self.add_ip_address("Ethernet104", "192.0.2.2/30") + + # create neighbor + self.add_neighbor("Ethernet104", "2001::1", "00:00:00:01:02:04", "IPv6") + self.add_neighbor("Ethernet104", "192.0.2.1", "00:00:00:01:02:05", "IPv4") + + # get nexthops + next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + assert len(next_hop_entries) == 2 + + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + for next_hop_entry in next_hop_entries: + (status, fvs) = tbl.get(next_hop_entry) + + assert status == True + assert len(fvs) == 3 + + for fv in fvs: + if fv[0] == "SAI_NEXT_HOP_ATTR_IP": + if fv[1] == "2001::1": + next_hop_ipv6_id = next_hop_entry + elif fv[1] == "192.0.2.1": + next_hop_ipv4_id = next_hop_entry + else: + assert False, "Nexthop IP %s not expected" % fv[1] + + assert next_hop_ipv6_id is not None + assert next_hop_ipv4_id is not None + + # create MySID END.X + fvs = swsscommon.FieldValuePairs([('action', 'end.x'), ('adj', '2001::1')]) + key = self.create_mysid(mysid5, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e000::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DX4 + fvs = swsscommon.FieldValuePairs([('action', 'end.dx4'), ('adj', '192.0.2.1')]) + key = self.create_mysid(mysid6, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e001::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv4_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DX6 + fvs = swsscommon.FieldValuePairs([('action', 'end.dx6'), ('adj', '2001::1')]) + key = self.create_mysid(mysid7, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e002::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID uA + fvs = swsscommon.FieldValuePairs([('action', 'ua'), ('adj', '2001::1')]) + key = self.create_mysid(mysid8, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e003::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_UA" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID uDX4 + fvs = swsscommon.FieldValuePairs([('action', 'udx4'), ('adj', '192.0.2.1')]) + key = self.create_mysid(mysid9, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e004::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv4_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX4" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # create MySID END.DX6 + fvs = swsscommon.FieldValuePairs([('action', 'udx6'), ('adj', '2001::1')]) + key = self.create_mysid(mysid10, fvs) + + # check ASIC MySID database + mysid = json.loads(key) + assert mysid["sid"] == "fc00:0:1:e005::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(key) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_ipv6_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_DX6" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + # delete MySID self.remove_mysid(mysid1) self.remove_mysid(mysid2) self.remove_mysid(mysid3) self.remove_mysid(mysid4) + self.remove_mysid(mysid5) + self.remove_mysid(mysid6) + self.remove_mysid(mysid7) + self.remove_mysid(mysid8) + self.remove_mysid(mysid9) + self.remove_mysid(mysid10) # remove vrf self.remove_vrf("VrfDt46") + # remove nexthop + self.remove_neighbor("Ethernet104", "2001::1") + self.remove_neighbor("Ethernet104", "192.0.2.1") + + # Reemove IP from interface + self.remove_ip_address("Ethernet104", "2001::2/126") + self.remove_ip_address("Ethernet104", "192.0.2.2/30") + + self.remove_l3_intf("Ethernet104") + + def test_mysid_l3adj(self, dvs, testlog): + self.setup_db(dvs) + + # create MySID entries + mysid1='32:16:16:0:fc00:0:1:e000::' + + # create interface + self.create_l3_intf("Ethernet104", "") + + # assign IP to interface + self.add_ip_address("Ethernet104", "2001::2/64") + + time.sleep(3) + + # bring up Ethernet104 + self.set_interface_status(dvs, "Ethernet104", "up") + + time.sleep(3) + + # save the initial number of entries in MySID table + initial_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + + # save the initial number of entries in Nexthop table + initial_next_hop_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP") + + # create MySID END.X, neighbor does not exist yet + fvs = swsscommon.FieldValuePairs([('action', 'end.x'), ('adj', '2001::1')]) + tbl = swsscommon.ProducerStateTable(self.pdb.db_connection, "SRV6_MY_SID_TABLE") + tbl.set(mysid1, fvs) + + time.sleep(2) + + # check the current number of entries in MySID table + # since the neighbor does not exist yet, we expect the SID has not been installed (i.e., we + # expect the same number of MySID entries as before) + exist_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + assert len(exist_my_sid_entries) == len(initial_my_sid_entries) + + # now, let's create the neighbor + self.add_neighbor("Ethernet104", "2001::1", "00:00:00:01:02:04", "IPv6") + + # verify that the nexthop is created in the ASIC (i.e., we have the previous number of next hop entries + 1) + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(initial_next_hop_entries) + 1) + + # get the new nexthop and nexthop ID, which will be used later to verify the MySID entry + next_hop_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", initial_next_hop_entries) + assert next_hop_entry is not None + next_hop_id = self.get_nexthop_id("2001::1") + assert next_hop_id is not None + + # now the neighbor has been created in the ASIC, we expect the MySID entry to be created in the ASIC as well + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", len(initial_my_sid_entries) + 1) + my_sid_entry = get_created_entry(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY", initial_my_sid_entries) + assert my_sid_entry is not None + + # check ASIC MySID database and verify the SID + mysid = json.loads(my_sid_entry) + assert mysid is not None + assert mysid["sid"] == "fc00:0:1:e000::" + tbl = swsscommon.Table(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + (status, fvs) = tbl.get(my_sid_entry) + assert status == True + for fv in fvs: + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_NEXT_HOP_ID": + assert fv[1] == next_hop_id + if fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_X" + elif fv[0] == "SAI_MY_SID_ENTRY_ATTR_ENDPOINT_BEHAVIOR_FLAVOR": + assert fv[1] == "SAI_MY_SID_ENTRY_ENDPOINT_BEHAVIOR_FLAVOR_PSP_AND_USD" + + # remove neighbor + self.remove_neighbor("Ethernet104", "2001::1") + + # delete MySID + self.remove_mysid(mysid1) + + # # verify that the nexthop has been removed from the ASIC + self.adb.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_NEXT_HOP", len(initial_next_hop_entries)) + + # check the current number of entries in MySID table + # since the MySID has been removed, we expect the SID has been removed from the ASIC as well + exist_my_sid_entries = get_exist_entries(self.adb.db_connection, "ASIC_STATE:SAI_OBJECT_TYPE_MY_SID_ENTRY") + assert len(exist_my_sid_entries) == len(initial_my_sid_entries) + + # remove IP from interface + self.remove_ip_address("Ethernet104", "2001::2/64") + + # remove interface + self.remove_l3_intf("Ethernet104") + class TestSrv6(object): def setup_db(self, dvs): self.pdb = dvs.get_app_db() diff --git a/tests/test_virtual_chassis.py b/tests/test_virtual_chassis.py index 6f95944c79f..c92ed88c40d 100644 --- a/tests/test_virtual_chassis.py +++ b/tests/test_virtual_chassis.py @@ -893,10 +893,8 @@ def test_chassis_add_remove_ports(self, vct): config_db.delete_entry('PORT', port) app_db.wait_for_deleted_entry('PORT_TABLE', port) num = asic_db.wait_for_n_keys("ASIC_STATE:SAI_OBJECT_TYPE_PORT", - num_ports-1) - assert len(num) == num_ports-1 - - marker = dvs.add_log_marker() + num_ports) + assert len(num) == num_ports # Create port config_db.update_entry("PORT", port, port_info) @@ -906,9 +904,8 @@ def test_chassis_add_remove_ports(self, vct): assert len(num) == num_ports # Check that we see the logs for removing default vlan - matching_log = "removeDefaultVlanMembers: Remove 0 VLAN members from default VLAN" _, logSeen = dvs.runcmd( [ "sh", "-c", - "awk '/{}/,ENDFILE {{print;}}' /var/log/syslog | grep '{}' | wc -l".format( marker, matching_log ) ] ) + "awk STARTFILE/ENDFILE /var/log/syslog | grep 'removeDefaultVlanMembers: Remove 32 VLAN members from default VLAN' | wc -l"] ) assert logSeen.strip() == "1" buffer_model.disable_dynamic_buffer(dvs.get_config_db(), dvs.runcmd) @@ -940,6 +937,46 @@ def test_voq_egress_queue_counter(self, vct): flex_db = dvs.get_flex_db() flex_db.wait_for_n_keys("FLEX_COUNTER_TABLE:QUEUE_STAT_COUNTER", num_queues_to_be_polled) + def test_chassis_wred_profile_on_system_ports(self, vct): + """Test whether wred profile is applied on system ports in VoQ chassis. + """ + dvss = vct.dvss + for name in dvss.keys(): + dvs = dvss[name] + + config_db = dvs.get_config_db() + app_db = dvs.get_app_db() + asic_db = dvs.get_asic_db() + metatbl = config_db.get_entry("DEVICE_METADATA", "localhost") + cfg_switch_type = metatbl.get("switch_type") + + if cfg_switch_type == "voq": + # Get all the keys from SYTEM_PORT table and check whether wred_profile is applied properly + system_ports = config_db.get_keys('SYSTEM_PORT') + + for key in system_ports: + queue3 = key + '|' + '3' + queue_entry = config_db.get_entry('QUEUE', queue3) + wred_profile = queue_entry['wred_profile'] + if wred_profile != 'AZURE_LOSSLESS': + print("WRED profile not applied on queue3 on system port %s", key) + assert wred_profile == 'AZURE_LOSSLESS' + + queue4 = key + '|' + '4' + queue_entry = config_db.get_entry('QUEUE', queue4) + wred_profile = queue_entry['wred_profile'] + if wred_profile != 'AZURE_LOSSLESS': + print("WRED profile not applied on queue4 on system port %s", key) + assert wred_profile == 'AZURE_LOSSLESS' + + # Check that we see the logs for applying WRED_PROFILE on all system ports + matching_log = "SAI_QUEUE_ATTR_WRED_PROFILE_ID" + _, logSeen = dvs.runcmd([ "sh", "-c", + "awk STARTFILE/ENDFILE /var/log/swss/sairedis.rec | grep SAI_QUEUE_ATTR_WRED_PROFILE_ID | wc -l"]) + + # Total number of logs = (No of system ports * No of lossless priorities) - No of lossless priorities for CPU ports + assert logSeen.strip() == str(len(system_ports)*2 - 2) + # Add Dummy always-pass test at end as workaroud # for issue when Flaky fail on final test it invokes module tear-down before retrying def test_nonflaky_dummy(): diff --git a/tests/test_warm_reboot.py b/tests/test_warm_reboot.py index b2edc425878..57632070208 100644 --- a/tests/test_warm_reboot.py +++ b/tests/test_warm_reboot.py @@ -60,6 +60,19 @@ def check_port_oper_status(appl_db, port_name, state): break assert oper_status == state +def check_port_host_tx_ready_status(state_db, port_name, status): + portTable = swsscommon.Table(state_db, swsscommon.STATE_PORT_TABLE_NAME) + (status, fvs) = portTable.get(port_name) + + assert status == True + + host_tx_ready = "N/A" + for v in fvs: + if v[0] == "host_tx_ready": + host_tx_ready = v[1] + break + assert host_tx_ready == status + # function to check the restore count incremented by 1 for a single process def swss_app_check_RestoreCount_single(state_db, restore_count, name): warmtbl = swsscommon.Table(state_db, swsscommon.STATE_WARM_RESTART_TABLE_NAME) @@ -294,6 +307,10 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): check_port_oper_status(appl_db, "Ethernet16", "up") check_port_oper_status(appl_db, "Ethernet20", "up") + # Ethernet port host_tx_ready status should be "true" + check_port_host_tx_ready_status(state_db, "Ethernet16", "true") + check_port_host_tx_ready_status(state_db, "Ethernet20", "true") + # Ping should work between servers via vs vlan interfaces ping_stats = dvs.servers[4].runcmd("ping -c 1 11.0.0.10") time.sleep(1) @@ -337,6 +354,9 @@ def test_PortSyncdWarmRestart(self, dvs, testlog): check_port_oper_status(appl_db, "Ethernet20", "up") check_port_oper_status(appl_db, "Ethernet24", "down") + check_port_host_tx_ready_status(state_db, "Ethernet16", "true") + check_port_host_tx_ready_status(state_db, "Ethernet20", "true") + check_port_host_tx_ready_status(state_db, "Ethernet24", "false") swss_app_check_RestoreCount_single(state_db, restore_count, "portsyncd") @@ -2397,7 +2417,7 @@ def test_TunnelMgrdWarmRestart(self, dvs): "ecn_mode": "standard", "ttl_mode": "pipe" } - + pubsub = dvs.SubscribeAppDbObject(tunnel_table) dvs.runcmd("config warm_restart enable swss") diff --git a/tests/virtual_chassis/1/default_config.json b/tests/virtual_chassis/1/default_config.json index 3e0c3fce72f..88769c9ce62 100644 --- a/tests/virtual_chassis/1/default_config.json +++ b/tests/virtual_chassis/1/default_config.json @@ -27,672 +27,672 @@ } }, "SYSTEM_PORT": { - "Linecard1|Ethernet0": { + "lc1|Ethernet0": { "speed": "40000", "system_port_id": "1", "switch_id": "0", "core_index": "0", "core_port_index": "1" }, - "Linecard1|Ethernet4": { + "lc1|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "2", "switch_id": "0", "core_index": "0", "core_port_index": "2" }, - "Linecard1|Ethernet8": { + "lc1|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "3", "switch_id": "0", "core_index": "0", "core_port_index": "3" }, - "Linecard1|Ethernet12": { + "lc1|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "4", "switch_id": "0", "core_index": "0", "core_port_index": "4" }, - "Linecard1|Ethernet16": { + "lc1|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "5", "switch_id": "0", "core_index": "0", "core_port_index": "5" }, - "Linecard1|Ethernet20": { + "lc1|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "6", "switch_id": "0", "core_index": "0", "core_port_index": "6" }, - "Linecard1|Ethernet24": { + "lc1|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "7", "switch_id": "0", "core_index": "0", "core_port_index": "7" }, - "Linecard1|Ethernet28": { + "lc1|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "8", "switch_id": "0", "core_index": "0", "core_port_index": "8" }, - "Linecard1|Ethernet32": { + "lc1|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "9", "switch_id": "0", "core_index": "0", "core_port_index": "9" }, - "Linecard1|Ethernet36": { + "lc1|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "10", "switch_id": "0", "core_index": "0", "core_port_index": "10" }, - "Linecard1|Ethernet40": { + "lc1|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "11", "switch_id": "0", "core_index": "0", "core_port_index": "11" }, - "Linecard1|Ethernet44": { + "lc1|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "12", "switch_id": "0", "core_index": "0", "core_port_index": "12" }, - "Linecard1|Ethernet48": { + "lc1|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "13", "switch_id": "0", "core_index": "0", "core_port_index": "13" }, - "Linecard1|Ethernet52": { + "lc1|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "14", "switch_id": "0", "core_index": "0", "core_port_index": "14" }, - "Linecard1|Ethernet56": { + "lc1|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "15", "switch_id": "0", "core_index": "0", "core_port_index": "15" }, - "Linecard1|Ethernet60": { + "lc1|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "16", "switch_id": "0", "core_index": "0", "core_port_index": "16" }, - "Linecard1|Ethernet64": { + "lc1|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "17", "switch_id": "0", "core_index": "1", "core_port_index": "1" }, - "Linecard1|Ethernet68": { + "lc1|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "18", "switch_id": "0", "core_index": "1", "core_port_index": "2" }, - "Linecard1|Ethernet72": { + "lc1|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "19", "switch_id": "0", "core_index": "1", "core_port_index": "3" }, - "Linecard1|Ethernet76": { + "lc1|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "20", "switch_id": "0", "core_index": "1", "core_port_index": "4" }, - "Linecard1|Ethernet80": { + "lc1|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "21", "switch_id": "0", "core_index": "1", "core_port_index": "5" }, - "Linecard1|Ethernet84": { + "lc1|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "22", "switch_id": "0", "core_index": "1", "core_port_index": "6" }, - "Linecard1|Ethernet88": { + "lc1|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "23", "switch_id": "0", "core_index": "1", "core_port_index": "7" }, - "Linecard1|Ethernet92": { + "lc1|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "24", "switch_id": "0", "core_index": "1", "core_port_index": "8" }, - "Linecard1|Ethernet96": { + "lc1|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "25", "switch_id": "0", "core_index": "1", "core_port_index": "9" }, - "Linecard1|Ethernet100": { + "lc1|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "26", "switch_id": "0", "core_index": "1", "core_port_index": "10" }, - "Linecard1|Ethernet104": { + "lc1|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "27", "switch_id": "0", "core_index": "1", "core_port_index": "11" }, - "Linecard1|Ethernet108": { + "lc1|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "28", "switch_id": "0", "core_index": "1", "core_port_index": "12" }, - "Linecard1|Ethernet112": { + "lc1|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "29", "switch_id": "0", "core_index": "1", "core_port_index": "13" }, - "Linecard1|Ethernet116": { + "lc1|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "30", "switch_id": "0", "core_index": "1", "core_port_index": "14" }, - "Linecard1|Ethernet120": { + "lc1|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "31", "switch_id": "0", "core_index": "1", "core_port_index": "15" }, - "Linecard1|Ethernet124": { + "lc1|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "32", "switch_id": "0", "core_index": "1", "core_port_index": "16" }, - "Linecard2|Ethernet0": { + "lc2|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "33", "switch_id": "2", "core_index": "0", "core_port_index": "1" }, - "Linecard2|Ethernet4": { + "lc2|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "34", "switch_id": "2", "core_index": "0", "core_port_index": "2" }, - "Linecard2|Ethernet8": { + "lc2|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "35", "switch_id": "2", "core_index": "0", "core_port_index": "3" }, - "Linecard2|Ethernet12": { + "lc2|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "36", "switch_id": "2", "core_index": "0", "core_port_index": "4" }, - "Linecard2|Ethernet16": { + "lc2|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "37", "switch_id": "2", "core_index": "0", "core_port_index": "5" }, - "Linecard2|Ethernet20": { + "lc2|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "38", "switch_id": "2", "core_index": "0", "core_port_index": "6" }, - "Linecard2|Ethernet24": { + "lc2|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "39", "switch_id": "2", "core_index": "0", "core_port_index": "7" }, - "Linecard2|Ethernet28": { + "lc2|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "40", "switch_id": "2", "core_index": "0", "core_port_index": "8" }, - "Linecard2|Ethernet32": { + "lc2|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "41", "switch_id": "2", "core_index": "0", "core_port_index": "9" }, - "Linecard2|Ethernet36": { + "lc2|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "42", "switch_id": "2", "core_index": "0", "core_port_index": "10" }, - "Linecard2|Ethernet40": { + "lc2|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "43", "switch_id": "2", "core_index": "0", "core_port_index": "11" }, - "Linecard2|Ethernet44": { + "lc2|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "44", "switch_id": "2", "core_index": "0", "core_port_index": "12" }, - "Linecard2|Ethernet48": { + "lc2|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "45", "switch_id": "2", "core_index": "0", "core_port_index": "13" }, - "Linecard2|Ethernet52": { + "lc2|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "46", "switch_id": "2", "core_index": "0", "core_port_index": "14" }, - "Linecard2|Ethernet56": { + "lc2|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "47", "switch_id": "2", "core_index": "0", "core_port_index": "15" }, - "Linecard2|Ethernet60": { + "lc2|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "48", "switch_id": "2", "core_index": "0", "core_port_index": "16" }, - "Linecard2|Ethernet64": { + "lc2|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "49", "switch_id": "2", "core_index": "1", "core_port_index": "1" }, - "Linecard2|Ethernet68": { + "lc2|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "50", "switch_id": "2", "core_index": "1", "core_port_index": "2" }, - "Linecard2|Ethernet72": { + "lc2|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "51", "switch_id": "2", "core_index": "1", "core_port_index": "3" }, - "Linecard2|Ethernet76": { + "lc2|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "52", "switch_id": "2", "core_index": "1", "core_port_index": "4" }, - "Linecard2|Ethernet80": { + "lc2|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "53", "switch_id": "2", "core_index": "1", "core_port_index": "5" }, - "Linecard2|Ethernet84": { + "lc2|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "54", "switch_id": "2", "core_index": "1", "core_port_index": "6" }, - "Linecard2|Ethernet88": { + "lc2|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "55", "switch_id": "2", "core_index": "1", "core_port_index": "7" }, - "Linecard2|Ethernet92": { + "lc2|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "56", "switch_id": "2", "core_index": "1", "core_port_index": "8" }, - "Linecard2|Ethernet96": { + "lc2|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "57", "switch_id": "2", "core_index": "1", "core_port_index": "9" }, - "Linecard2|Ethernet100": { + "lc2|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "58", "switch_id": "2", "core_index": "1", "core_port_index": "10" }, - "Linecard2|Ethernet104": { + "lc2|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "59", "switch_id": "2", "core_index": "1", "core_port_index": "11" }, - "Linecard2|Ethernet108": { + "lc2|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "60", "switch_id": "2", "core_index": "1", "core_port_index": "12" }, - "Linecard2|Ethernet112": { + "lc2|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "61", "switch_id": "2", "core_index": "1", "core_port_index": "13" }, - "Linecard2|Ethernet116": { + "lc2|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "62", "switch_id": "2", "core_index": "1", "core_port_index": "14" }, - "Linecard2|Ethernet120": { + "lc2|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "63", "switch_id": "2", "core_index": "1", "core_port_index": "15" }, - "Linecard2|Ethernet124": { + "lc2|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "64", "switch_id": "2", "core_index": "1", "core_port_index": "16" }, - "Linecard3|Ethernet0": { + "lc3|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "65", "switch_id": "4", "core_index": "0", "core_port_index": "1" }, - "Linecard3|Ethernet4": { + "lc3|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "66", "switch_id": "4", "core_index": "0", "core_port_index": "2" }, - "Linecard3|Ethernet8": { + "lc3|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "67", "switch_id": "4", "core_index": "0", "core_port_index": "3" }, - "Linecard3|Ethernet12": { + "lc3|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "68", "switch_id": "4", "core_index": "0", "core_port_index": "4" }, - "Linecard3|Ethernet16": { + "lc3|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "69", "switch_id": "4", "core_index": "0", "core_port_index": "5" }, - "Linecard3|Ethernet20": { + "lc3|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "70", "switch_id": "4", "core_index": "0", "core_port_index": "6" }, - "Linecard3|Ethernet24": { + "lc3|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "71", "switch_id": "4", "core_index": "0", "core_port_index": "7" }, - "Linecard3|Ethernet28": { + "lc3|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "72", "switch_id": "4", "core_index": "0", "core_port_index": "8" }, - "Linecard3|Ethernet32": { + "lc3|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "73", "switch_id": "4", "core_index": "0", "core_port_index": "9" }, - "Linecard3|Ethernet36": { + "lc3|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "74", "switch_id": "4", "core_index": "0", "core_port_index": "10" }, - "Linecard3|Ethernet40": { + "lc3|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "75", "switch_id": "4", "core_index": "0", "core_port_index": "11" }, - "Linecard3|Ethernet44": { + "lc3|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "76", "switch_id": "4", "core_index": "0", "core_port_index": "12" }, - "Linecard3|Ethernet48": { + "lc3|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "77", "switch_id": "4", "core_index": "0", "core_port_index": "13" }, - "Linecard3|Ethernet52": { + "lc3|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "78", "switch_id": "4", "core_index": "0", "core_port_index": "14" }, - "Linecard3|Ethernet56": { + "lc3|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "79", "switch_id": "4", "core_index": "0", "core_port_index": "15" }, - "Linecard3|Ethernet60": { + "lc3|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "80", "switch_id": "4", "core_index": "0", "core_port_index": "16" }, - "Linecard3|Ethernet64": { + "lc3|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "81", "switch_id": "4", "core_index": "1", "core_port_index": "1" }, - "Linecard3|Ethernet68": { + "lc3|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "82", "switch_id": "4", "core_index": "1", "core_port_index": "2" }, - "Linecard3|Ethernet72": { + "lc3|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "83", "switch_id": "4", "core_index": "1", "core_port_index": "3" }, - "Linecard3|Ethernet76": { + "lc3|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "84", "switch_id": "4", "core_index": "1", "core_port_index": "4" }, - "Linecard3|Ethernet80": { + "lc3|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "85", "switch_id": "4", "core_index": "1", "core_port_index": "5" }, - "Linecard3|Ethernet84": { + "lc3|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "86", "switch_id": "4", "core_index": "1", "core_port_index": "6" }, - "Linecard3|Ethernet88": { + "lc3|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "87", "switch_id": "4", "core_index": "1", "core_port_index": "7" }, - "Linecard3|Ethernet92": { + "lc3|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "88", "switch_id": "4", "core_index": "1", "core_port_index": "8" }, - "Linecard3|Ethernet96": { + "lc3|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "89", "switch_id": "4", "core_index": "1", "core_port_index": "9" }, - "Linecard3|Ethernet100": { + "lc3|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "90", "switch_id": "4", "core_index": "1", "core_port_index": "10" }, - "Linecard3|Ethernet104": { + "lc3|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "91", "switch_id": "4", "core_index": "1", "core_port_index": "11" }, - "Linecard3|Ethernet108": { + "lc3|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "92", "switch_id": "4", "core_index": "1", "core_port_index": "12" }, - "Linecard3|Ethernet112": { + "lc3|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "93", "switch_id": "4", "core_index": "1", "core_port_index": "13" }, - "Linecard3|Ethernet116": { + "lc3|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "94", "switch_id": "4", "core_index": "1", "core_port_index": "14" }, - "Linecard3|Ethernet120": { + "lc3|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "95", "switch_id": "4", "core_index": "1", "core_port_index": "15" }, - "Linecard3|Ethernet124": { + "lc3|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "96", "switch_id": "4", diff --git a/tests/virtual_chassis/2/default_config.json b/tests/virtual_chassis/2/default_config.json index d306c30ea37..2556706ce0a 100644 --- a/tests/virtual_chassis/2/default_config.json +++ b/tests/virtual_chassis/2/default_config.json @@ -22,672 +22,672 @@ } }, "SYSTEM_PORT": { - "Linecard1|Ethernet0": { + "lc1|Ethernet0": { "speed": "40000", "system_port_id": "1", "switch_id": "0", "core_index": "0", "core_port_index": "1" }, - "Linecard1|Ethernet4": { + "lc1|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "2", "switch_id": "0", "core_index": "0", "core_port_index": "2" }, - "Linecard1|Ethernet8": { + "lc1|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "3", "switch_id": "0", "core_index": "0", "core_port_index": "3" }, - "Linecard1|Ethernet12": { + "lc1|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "4", "switch_id": "0", "core_index": "0", "core_port_index": "4" }, - "Linecard1|Ethernet16": { + "lc1|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "5", "switch_id": "0", "core_index": "0", "core_port_index": "5" }, - "Linecard1|Ethernet20": { + "lc1|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "6", "switch_id": "0", "core_index": "0", "core_port_index": "6" }, - "Linecard1|Ethernet24": { + "lc1|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "7", "switch_id": "0", "core_index": "0", "core_port_index": "7" }, - "Linecard1|Ethernet28": { + "lc1|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "8", "switch_id": "0", "core_index": "0", "core_port_index": "8" }, - "Linecard1|Ethernet32": { + "lc1|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "9", "switch_id": "0", "core_index": "0", "core_port_index": "9" }, - "Linecard1|Ethernet36": { + "lc1|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "10", "switch_id": "0", "core_index": "0", "core_port_index": "10" }, - "Linecard1|Ethernet40": { + "lc1|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "11", "switch_id": "0", "core_index": "0", "core_port_index": "11" }, - "Linecard1|Ethernet44": { + "lc1|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "12", "switch_id": "0", "core_index": "0", "core_port_index": "12" }, - "Linecard1|Ethernet48": { + "lc1|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "13", "switch_id": "0", "core_index": "0", "core_port_index": "13" }, - "Linecard1|Ethernet52": { + "lc1|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "14", "switch_id": "0", "core_index": "0", "core_port_index": "14" }, - "Linecard1|Ethernet56": { + "lc1|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "15", "switch_id": "0", "core_index": "0", "core_port_index": "15" }, - "Linecard1|Ethernet60": { + "lc1|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "16", "switch_id": "0", "core_index": "0", "core_port_index": "16" }, - "Linecard1|Ethernet64": { + "lc1|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "17", "switch_id": "0", "core_index": "1", "core_port_index": "1" }, - "Linecard1|Ethernet68": { + "lc1|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "18", "switch_id": "0", "core_index": "1", "core_port_index": "2" }, - "Linecard1|Ethernet72": { + "lc1|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "19", "switch_id": "0", "core_index": "1", "core_port_index": "3" }, - "Linecard1|Ethernet76": { + "lc1|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "20", "switch_id": "0", "core_index": "1", "core_port_index": "4" }, - "Linecard1|Ethernet80": { + "lc1|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "21", "switch_id": "0", "core_index": "1", "core_port_index": "5" }, - "Linecard1|Ethernet84": { + "lc1|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "22", "switch_id": "0", "core_index": "1", "core_port_index": "6" }, - "Linecard1|Ethernet88": { + "lc1|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "23", "switch_id": "0", "core_index": "1", "core_port_index": "7" }, - "Linecard1|Ethernet92": { + "lc1|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "24", "switch_id": "0", "core_index": "1", "core_port_index": "8" }, - "Linecard1|Ethernet96": { + "lc1|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "25", "switch_id": "0", "core_index": "1", "core_port_index": "9" }, - "Linecard1|Ethernet100": { + "lc1|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "26", "switch_id": "0", "core_index": "1", "core_port_index": "10" }, - "Linecard1|Ethernet104": { + "lc1|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "27", "switch_id": "0", "core_index": "1", "core_port_index": "11" }, - "Linecard1|Ethernet108": { + "lc1|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "28", "switch_id": "0", "core_index": "1", "core_port_index": "12" }, - "Linecard1|Ethernet112": { + "lc1|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "29", "switch_id": "0", "core_index": "1", "core_port_index": "13" }, - "Linecard1|Ethernet116": { + "lc1|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "30", "switch_id": "0", "core_index": "1", "core_port_index": "14" }, - "Linecard1|Ethernet120": { + "lc1|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "31", "switch_id": "0", "core_index": "1", "core_port_index": "15" }, - "Linecard1|Ethernet124": { + "lc1|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "32", "switch_id": "0", "core_index": "1", "core_port_index": "16" }, - "Linecard2|Ethernet0": { + "lc2|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "33", "switch_id": "2", "core_index": "0", "core_port_index": "1" }, - "Linecard2|Ethernet4": { + "lc2|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "34", "switch_id": "2", "core_index": "0", "core_port_index": "2" }, - "Linecard2|Ethernet8": { + "lc2|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "35", "switch_id": "2", "core_index": "0", "core_port_index": "3" }, - "Linecard2|Ethernet12": { + "lc2|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "36", "switch_id": "2", "core_index": "0", "core_port_index": "4" }, - "Linecard2|Ethernet16": { + "lc2|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "37", "switch_id": "2", "core_index": "0", "core_port_index": "5" }, - "Linecard2|Ethernet20": { + "lc2|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "38", "switch_id": "2", "core_index": "0", "core_port_index": "6" }, - "Linecard2|Ethernet24": { + "lc2|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "39", "switch_id": "2", "core_index": "0", "core_port_index": "7" }, - "Linecard2|Ethernet28": { + "lc2|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "40", "switch_id": "2", "core_index": "0", "core_port_index": "8" }, - "Linecard2|Ethernet32": { + "lc2|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "41", "switch_id": "2", "core_index": "0", "core_port_index": "9" }, - "Linecard2|Ethernet36": { + "lc2|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "42", "switch_id": "2", "core_index": "0", "core_port_index": "10" }, - "Linecard2|Ethernet40": { + "lc2|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "43", "switch_id": "2", "core_index": "0", "core_port_index": "11" }, - "Linecard2|Ethernet44": { + "lc2|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "44", "switch_id": "2", "core_index": "0", "core_port_index": "12" }, - "Linecard2|Ethernet48": { + "lc2|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "45", "switch_id": "2", "core_index": "0", "core_port_index": "13" }, - "Linecard2|Ethernet52": { + "lc2|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "46", "switch_id": "2", "core_index": "0", "core_port_index": "14" }, - "Linecard2|Ethernet56": { + "lc2|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "47", "switch_id": "2", "core_index": "0", "core_port_index": "15" }, - "Linecard2|Ethernet60": { + "lc2|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "48", "switch_id": "2", "core_index": "0", "core_port_index": "16" }, - "Linecard2|Ethernet64": { + "lc2|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "49", "switch_id": "2", "core_index": "1", "core_port_index": "1" }, - "Linecard2|Ethernet68": { + "lc2|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "50", "switch_id": "2", "core_index": "1", "core_port_index": "2" }, - "Linecard2|Ethernet72": { + "lc2|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "51", "switch_id": "2", "core_index": "1", "core_port_index": "3" }, - "Linecard2|Ethernet76": { + "lc2|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "52", "switch_id": "2", "core_index": "1", "core_port_index": "4" }, - "Linecard2|Ethernet80": { + "lc2|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "53", "switch_id": "2", "core_index": "1", "core_port_index": "5" }, - "Linecard2|Ethernet84": { + "lc2|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "54", "switch_id": "2", "core_index": "1", "core_port_index": "6" }, - "Linecard2|Ethernet88": { + "lc2|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "55", "switch_id": "2", "core_index": "1", "core_port_index": "7" }, - "Linecard2|Ethernet92": { + "lc2|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "56", "switch_id": "2", "core_index": "1", "core_port_index": "8" }, - "Linecard2|Ethernet96": { + "lc2|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "57", "switch_id": "2", "core_index": "1", "core_port_index": "9" }, - "Linecard2|Ethernet100": { + "lc2|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "58", "switch_id": "2", "core_index": "1", "core_port_index": "10" }, - "Linecard2|Ethernet104": { + "lc2|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "59", "switch_id": "2", "core_index": "1", "core_port_index": "11" }, - "Linecard2|Ethernet108": { + "lc2|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "60", "switch_id": "2", "core_index": "1", "core_port_index": "12" }, - "Linecard2|Ethernet112": { + "lc2|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "61", "switch_id": "2", "core_index": "1", "core_port_index": "13" }, - "Linecard2|Ethernet116": { + "lc2|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "62", "switch_id": "2", "core_index": "1", "core_port_index": "14" }, - "Linecard2|Ethernet120": { + "lc2|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "63", "switch_id": "2", "core_index": "1", "core_port_index": "15" }, - "Linecard2|Ethernet124": { + "lc2|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "64", "switch_id": "2", "core_index": "1", "core_port_index": "16" }, - "Linecard3|Ethernet0": { + "lc3|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "65", "switch_id": "4", "core_index": "0", "core_port_index": "1" }, - "Linecard3|Ethernet4": { + "lc3|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "66", "switch_id": "4", "core_index": "0", "core_port_index": "2" }, - "Linecard3|Ethernet8": { + "lc3|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "67", "switch_id": "4", "core_index": "0", "core_port_index": "3" }, - "Linecard3|Ethernet12": { + "lc3|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "68", "switch_id": "4", "core_index": "0", "core_port_index": "4" }, - "Linecard3|Ethernet16": { + "lc3|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "69", "switch_id": "4", "core_index": "0", "core_port_index": "5" }, - "Linecard3|Ethernet20": { + "lc3|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "70", "switch_id": "4", "core_index": "0", "core_port_index": "6" }, - "Linecard3|Ethernet24": { + "lc3|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "71", "switch_id": "4", "core_index": "0", "core_port_index": "7" }, - "Linecard3|Ethernet28": { + "lc3|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "72", "switch_id": "4", "core_index": "0", "core_port_index": "8" }, - "Linecard3|Ethernet32": { + "lc3|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "73", "switch_id": "4", "core_index": "0", "core_port_index": "9" }, - "Linecard3|Ethernet36": { + "lc3|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "74", "switch_id": "4", "core_index": "0", "core_port_index": "10" }, - "Linecard3|Ethernet40": { + "lc3|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "75", "switch_id": "4", "core_index": "0", "core_port_index": "11" }, - "Linecard3|Ethernet44": { + "lc3|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "76", "switch_id": "4", "core_index": "0", "core_port_index": "12" }, - "Linecard3|Ethernet48": { + "lc3|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "77", "switch_id": "4", "core_index": "0", "core_port_index": "13" }, - "Linecard3|Ethernet52": { + "lc3|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "78", "switch_id": "4", "core_index": "0", "core_port_index": "14" }, - "Linecard3|Ethernet56": { + "lc3|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "79", "switch_id": "4", "core_index": "0", "core_port_index": "15" }, - "Linecard3|Ethernet60": { + "lc3|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "80", "switch_id": "4", "core_index": "0", "core_port_index": "16" }, - "Linecard3|Ethernet64": { + "lc3|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "81", "switch_id": "4", "core_index": "1", "core_port_index": "1" }, - "Linecard3|Ethernet68": { + "lc3|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "82", "switch_id": "4", "core_index": "1", "core_port_index": "2" }, - "Linecard3|Ethernet72": { + "lc3|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "83", "switch_id": "4", "core_index": "1", "core_port_index": "3" }, - "Linecard3|Ethernet76": { + "lc3|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "84", "switch_id": "4", "core_index": "1", "core_port_index": "4" }, - "Linecard3|Ethernet80": { + "lc3|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "85", "switch_id": "4", "core_index": "1", "core_port_index": "5" }, - "Linecard3|Ethernet84": { + "lc3|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "86", "switch_id": "4", "core_index": "1", "core_port_index": "6" }, - "Linecard3|Ethernet88": { + "lc3|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "87", "switch_id": "4", "core_index": "1", "core_port_index": "7" }, - "Linecard3|Ethernet92": { + "lc3|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "88", "switch_id": "4", "core_index": "1", "core_port_index": "8" }, - "Linecard3|Ethernet96": { + "lc3|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "89", "switch_id": "4", "core_index": "1", "core_port_index": "9" }, - "Linecard3|Ethernet100": { + "lc3|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "90", "switch_id": "4", "core_index": "1", "core_port_index": "10" }, - "Linecard3|Ethernet104": { + "lc3|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "91", "switch_id": "4", "core_index": "1", "core_port_index": "11" }, - "Linecard3|Ethernet108": { + "lc3|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "92", "switch_id": "4", "core_index": "1", "core_port_index": "12" }, - "Linecard3|Ethernet112": { + "lc3|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "93", "switch_id": "4", "core_index": "1", "core_port_index": "13" }, - "Linecard3|Ethernet116": { + "lc3|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "94", "switch_id": "4", "core_index": "1", "core_port_index": "14" }, - "Linecard3|Ethernet120": { + "lc3|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "95", "switch_id": "4", "core_index": "1", "core_port_index": "15" }, - "Linecard3|Ethernet124": { + "lc3|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "96", "switch_id": "4", diff --git a/tests/virtual_chassis/3/default_config.json b/tests/virtual_chassis/3/default_config.json index 4579733d353..7b747d5e548 100644 --- a/tests/virtual_chassis/3/default_config.json +++ b/tests/virtual_chassis/3/default_config.json @@ -22,672 +22,672 @@ } }, "SYSTEM_PORT": { - "Linecard1|Ethernet0": { + "lc1|Ethernet0": { "speed": "40000", "system_port_id": "1", "switch_id": "0", "core_index": "0", "core_port_index": "1" }, - "Linecard1|Ethernet4": { + "lc1|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "2", "switch_id": "0", "core_index": "0", "core_port_index": "2" }, - "Linecard1|Ethernet8": { + "lc1|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "3", "switch_id": "0", "core_index": "0", "core_port_index": "3" }, - "Linecard1|Ethernet12": { + "lc1|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "4", "switch_id": "0", "core_index": "0", "core_port_index": "4" }, - "Linecard1|Ethernet16": { + "lc1|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "5", "switch_id": "0", "core_index": "0", "core_port_index": "5" }, - "Linecard1|Ethernet20": { + "lc1|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "6", "switch_id": "0", "core_index": "0", "core_port_index": "6" }, - "Linecard1|Ethernet24": { + "lc1|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "7", "switch_id": "0", "core_index": "0", "core_port_index": "7" }, - "Linecard1|Ethernet28": { + "lc1|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "8", "switch_id": "0", "core_index": "0", "core_port_index": "8" }, - "Linecard1|Ethernet32": { + "lc1|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "9", "switch_id": "0", "core_index": "0", "core_port_index": "9" }, - "Linecard1|Ethernet36": { + "lc1|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "10", "switch_id": "0", "core_index": "0", "core_port_index": "10" }, - "Linecard1|Ethernet40": { + "lc1|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "11", "switch_id": "0", "core_index": "0", "core_port_index": "11" }, - "Linecard1|Ethernet44": { + "lc1|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "12", "switch_id": "0", "core_index": "0", "core_port_index": "12" }, - "Linecard1|Ethernet48": { + "lc1|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "13", "switch_id": "0", "core_index": "0", "core_port_index": "13" }, - "Linecard1|Ethernet52": { + "lc1|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "14", "switch_id": "0", "core_index": "0", "core_port_index": "14" }, - "Linecard1|Ethernet56": { + "lc1|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "15", "switch_id": "0", "core_index": "0", "core_port_index": "15" }, - "Linecard1|Ethernet60": { + "lc1|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "16", "switch_id": "0", "core_index": "0", "core_port_index": "16" }, - "Linecard1|Ethernet64": { + "lc1|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "17", "switch_id": "0", "core_index": "1", "core_port_index": "1" }, - "Linecard1|Ethernet68": { + "lc1|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "18", "switch_id": "0", "core_index": "1", "core_port_index": "2" }, - "Linecard1|Ethernet72": { + "lc1|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "19", "switch_id": "0", "core_index": "1", "core_port_index": "3" }, - "Linecard1|Ethernet76": { + "lc1|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "20", "switch_id": "0", "core_index": "1", "core_port_index": "4" }, - "Linecard1|Ethernet80": { + "lc1|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "21", "switch_id": "0", "core_index": "1", "core_port_index": "5" }, - "Linecard1|Ethernet84": { + "lc1|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "22", "switch_id": "0", "core_index": "1", "core_port_index": "6" }, - "Linecard1|Ethernet88": { + "lc1|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "23", "switch_id": "0", "core_index": "1", "core_port_index": "7" }, - "Linecard1|Ethernet92": { + "lc1|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "24", "switch_id": "0", "core_index": "1", "core_port_index": "8" }, - "Linecard1|Ethernet96": { + "lc1|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "25", "switch_id": "0", "core_index": "1", "core_port_index": "9" }, - "Linecard1|Ethernet100": { + "lc1|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "26", "switch_id": "0", "core_index": "1", "core_port_index": "10" }, - "Linecard1|Ethernet104": { + "lc1|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "27", "switch_id": "0", "core_index": "1", "core_port_index": "11" }, - "Linecard1|Ethernet108": { + "lc1|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "28", "switch_id": "0", "core_index": "1", "core_port_index": "12" }, - "Linecard1|Ethernet112": { + "lc1|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "29", "switch_id": "0", "core_index": "1", "core_port_index": "13" }, - "Linecard1|Ethernet116": { + "lc1|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "30", "switch_id": "0", "core_index": "1", "core_port_index": "14" }, - "Linecard1|Ethernet120": { + "lc1|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "31", "switch_id": "0", "core_index": "1", "core_port_index": "15" }, - "Linecard1|Ethernet124": { + "lc1|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "32", "switch_id": "0", "core_index": "1", "core_port_index": "16" }, - "Linecard2|Ethernet0": { + "lc2|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "33", "switch_id": "2", "core_index": "0", "core_port_index": "1" }, - "Linecard2|Ethernet4": { + "lc2|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "34", "switch_id": "2", "core_index": "0", "core_port_index": "2" }, - "Linecard2|Ethernet8": { + "lc2|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "35", "switch_id": "2", "core_index": "0", "core_port_index": "3" }, - "Linecard2|Ethernet12": { + "lc2|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "36", "switch_id": "2", "core_index": "0", "core_port_index": "4" }, - "Linecard2|Ethernet16": { + "lc2|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "37", "switch_id": "2", "core_index": "0", "core_port_index": "5" }, - "Linecard2|Ethernet20": { + "lc2|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "38", "switch_id": "2", "core_index": "0", "core_port_index": "6" }, - "Linecard2|Ethernet24": { + "lc2|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "39", "switch_id": "2", "core_index": "0", "core_port_index": "7" }, - "Linecard2|Ethernet28": { + "lc2|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "40", "switch_id": "2", "core_index": "0", "core_port_index": "8" }, - "Linecard2|Ethernet32": { + "lc2|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "41", "switch_id": "2", "core_index": "0", "core_port_index": "9" }, - "Linecard2|Ethernet36": { + "lc2|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "42", "switch_id": "2", "core_index": "0", "core_port_index": "10" }, - "Linecard2|Ethernet40": { + "lc2|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "43", "switch_id": "2", "core_index": "0", "core_port_index": "11" }, - "Linecard2|Ethernet44": { + "lc2|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "44", "switch_id": "2", "core_index": "0", "core_port_index": "12" }, - "Linecard2|Ethernet48": { + "lc2|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "45", "switch_id": "2", "core_index": "0", "core_port_index": "13" }, - "Linecard2|Ethernet52": { + "lc2|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "46", "switch_id": "2", "core_index": "0", "core_port_index": "14" }, - "Linecard2|Ethernet56": { + "lc2|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "47", "switch_id": "2", "core_index": "0", "core_port_index": "15" }, - "Linecard2|Ethernet60": { + "lc2|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "48", "switch_id": "2", "core_index": "0", "core_port_index": "16" }, - "Linecard2|Ethernet64": { + "lc2|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "49", "switch_id": "2", "core_index": "1", "core_port_index": "1" }, - "Linecard2|Ethernet68": { + "lc2|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "50", "switch_id": "2", "core_index": "1", "core_port_index": "2" }, - "Linecard2|Ethernet72": { + "lc2|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "51", "switch_id": "2", "core_index": "1", "core_port_index": "3" }, - "Linecard2|Ethernet76": { + "lc2|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "52", "switch_id": "2", "core_index": "1", "core_port_index": "4" }, - "Linecard2|Ethernet80": { + "lc2|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "53", "switch_id": "2", "core_index": "1", "core_port_index": "5" }, - "Linecard2|Ethernet84": { + "lc2|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "54", "switch_id": "2", "core_index": "1", "core_port_index": "6" }, - "Linecard2|Ethernet88": { + "lc2|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "55", "switch_id": "2", "core_index": "1", "core_port_index": "7" }, - "Linecard2|Ethernet92": { + "lc2|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "56", "switch_id": "2", "core_index": "1", "core_port_index": "8" }, - "Linecard2|Ethernet96": { + "lc2|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "57", "switch_id": "2", "core_index": "1", "core_port_index": "9" }, - "Linecard2|Ethernet100": { + "lc2|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "58", "switch_id": "2", "core_index": "1", "core_port_index": "10" }, - "Linecard2|Ethernet104": { + "lc2|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "59", "switch_id": "2", "core_index": "1", "core_port_index": "11" }, - "Linecard2|Ethernet108": { + "lc2|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "60", "switch_id": "2", "core_index": "1", "core_port_index": "12" }, - "Linecard2|Ethernet112": { + "lc2|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "61", "switch_id": "2", "core_index": "1", "core_port_index": "13" }, - "Linecard2|Ethernet116": { + "lc2|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "62", "switch_id": "2", "core_index": "1", "core_port_index": "14" }, - "Linecard2|Ethernet120": { + "lc2|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "63", "switch_id": "2", "core_index": "1", "core_port_index": "15" }, - "Linecard2|Ethernet124": { + "lc2|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "64", "switch_id": "2", "core_index": "1", "core_port_index": "16" }, - "Linecard3|Ethernet0": { + "lc3|Asic0|Ethernet0": { "speed": "40000", "system_port_id": "65", "switch_id": "4", "core_index": "0", "core_port_index": "1" }, - "Linecard3|Ethernet4": { + "lc3|Asic0|Ethernet4": { "speed": "40000", "system_port_id": "66", "switch_id": "4", "core_index": "0", "core_port_index": "2" }, - "Linecard3|Ethernet8": { + "lc3|Asic0|Ethernet8": { "speed": "40000", "system_port_id": "67", "switch_id": "4", "core_index": "0", "core_port_index": "3" }, - "Linecard3|Ethernet12": { + "lc3|Asic0|Ethernet12": { "speed": "40000", "system_port_id": "68", "switch_id": "4", "core_index": "0", "core_port_index": "4" }, - "Linecard3|Ethernet16": { + "lc3|Asic0|Ethernet16": { "speed": "40000", "system_port_id": "69", "switch_id": "4", "core_index": "0", "core_port_index": "5" }, - "Linecard3|Ethernet20": { + "lc3|Asic0|Ethernet20": { "speed": "40000", "system_port_id": "70", "switch_id": "4", "core_index": "0", "core_port_index": "6" }, - "Linecard3|Ethernet24": { + "lc3|Asic0|Ethernet24": { "speed": "40000", "system_port_id": "71", "switch_id": "4", "core_index": "0", "core_port_index": "7" }, - "Linecard3|Ethernet28": { + "lc3|Asic0|Ethernet28": { "speed": "40000", "system_port_id": "72", "switch_id": "4", "core_index": "0", "core_port_index": "8" }, - "Linecard3|Ethernet32": { + "lc3|Asic0|Ethernet32": { "speed": "40000", "system_port_id": "73", "switch_id": "4", "core_index": "0", "core_port_index": "9" }, - "Linecard3|Ethernet36": { + "lc3|Asic0|Ethernet36": { "speed": "40000", "system_port_id": "74", "switch_id": "4", "core_index": "0", "core_port_index": "10" }, - "Linecard3|Ethernet40": { + "lc3|Asic0|Ethernet40": { "speed": "40000", "system_port_id": "75", "switch_id": "4", "core_index": "0", "core_port_index": "11" }, - "Linecard3|Ethernet44": { + "lc3|Asic0|Ethernet44": { "speed": "40000", "system_port_id": "76", "switch_id": "4", "core_index": "0", "core_port_index": "12" }, - "Linecard3|Ethernet48": { + "lc3|Asic0|Ethernet48": { "speed": "40000", "system_port_id": "77", "switch_id": "4", "core_index": "0", "core_port_index": "13" }, - "Linecard3|Ethernet52": { + "lc3|Asic0|Ethernet52": { "speed": "40000", "system_port_id": "78", "switch_id": "4", "core_index": "0", "core_port_index": "14" }, - "Linecard3|Ethernet56": { + "lc3|Asic0|Ethernet56": { "speed": "40000", "system_port_id": "79", "switch_id": "4", "core_index": "0", "core_port_index": "15" }, - "Linecard3|Ethernet60": { + "lc3|Asic0|Ethernet60": { "speed": "40000", "system_port_id": "80", "switch_id": "4", "core_index": "0", "core_port_index": "16" }, - "Linecard3|Ethernet64": { + "lc3|Asic0|Ethernet64": { "speed": "40000", "system_port_id": "81", "switch_id": "4", "core_index": "1", "core_port_index": "1" }, - "Linecard3|Ethernet68": { + "lc3|Asic0|Ethernet68": { "speed": "40000", "system_port_id": "82", "switch_id": "4", "core_index": "1", "core_port_index": "2" }, - "Linecard3|Ethernet72": { + "lc3|Asic0|Ethernet72": { "speed": "40000", "system_port_id": "83", "switch_id": "4", "core_index": "1", "core_port_index": "3" }, - "Linecard3|Ethernet76": { + "lc3|Asic0|Ethernet76": { "speed": "40000", "system_port_id": "84", "switch_id": "4", "core_index": "1", "core_port_index": "4" }, - "Linecard3|Ethernet80": { + "lc3|Asic0|Ethernet80": { "speed": "40000", "system_port_id": "85", "switch_id": "4", "core_index": "1", "core_port_index": "5" }, - "Linecard3|Ethernet84": { + "lc3|Asic0|Ethernet84": { "speed": "40000", "system_port_id": "86", "switch_id": "4", "core_index": "1", "core_port_index": "6" }, - "Linecard3|Ethernet88": { + "lc3|Asic0|Ethernet88": { "speed": "40000", "system_port_id": "87", "switch_id": "4", "core_index": "1", "core_port_index": "7" }, - "Linecard3|Ethernet92": { + "lc3|Asic0|Ethernet92": { "speed": "40000", "system_port_id": "88", "switch_id": "4", "core_index": "1", "core_port_index": "8" }, - "Linecard3|Ethernet96": { + "lc3|Asic0|Ethernet96": { "speed": "40000", "system_port_id": "89", "switch_id": "4", "core_index": "1", "core_port_index": "9" }, - "Linecard3|Ethernet100": { + "lc3|Asic0|Ethernet100": { "speed": "40000", "system_port_id": "90", "switch_id": "4", "core_index": "1", "core_port_index": "10" }, - "Linecard3|Ethernet104": { + "lc3|Asic0|Ethernet104": { "speed": "40000", "system_port_id": "91", "switch_id": "4", "core_index": "1", "core_port_index": "11" }, - "Linecard3|Ethernet108": { + "lc3|Asic0|Ethernet108": { "speed": "40000", "system_port_id": "92", "switch_id": "4", "core_index": "1", "core_port_index": "12" }, - "Linecard3|Ethernet112": { + "lc3|Asic0|Ethernet112": { "speed": "40000", "system_port_id": "93", "switch_id": "4", "core_index": "1", "core_port_index": "13" }, - "Linecard3|Ethernet116": { + "lc3|Asic0|Ethernet116": { "speed": "40000", "system_port_id": "94", "switch_id": "4", "core_index": "1", "core_port_index": "14" }, - "Linecard3|Ethernet120": { + "lc3|Asic0|Ethernet120": { "speed": "40000", "system_port_id": "95", "switch_id": "4", "core_index": "1", "core_port_index": "15" }, - "Linecard3|Ethernet124": { + "lc3|Asic0|Ethernet124": { "speed": "40000", "system_port_id": "96", "switch_id": "4",