Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions orchagent/pfc_detect_broadcom.lua
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,59 @@ local rets = {}

redis.call('SELECT', counters_db)

local function parse_boolean(str) return str == 'true' end
local function parse_number(str) return tonumber(str) or 0 end

local function updateTimePaused(port_key, prio, time_since_last_poll)
-- Estimate that queue paused for entire poll duration
local total_pause_time_field = 'SAI_PORT_STAT_PFC_' .. prio .. '_RX_PAUSE_DURATION_US'
local recent_pause_time_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RECENT_PAUSE_TIME_US'

local recent_pause_time_us = parse_number(
redis.call('HGET', port_key, recent_pause_time_field)
)
local total_pause_time_us = redis.call('HGET', port_key, total_pause_time_field)

-- Only estimate total time when no SAI support
if not total_pause_time_us then
total_pause_time_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RX_PAUSE_DURATION_US'
total_pause_time_us = parse_number(
redis.call('HGET', port_key, total_pause_time_field)
)

local total_pause_time_us_new = total_pause_time_us + time_since_last_poll
redis.call('HSET', port_key, total_pause_time_field, total_pause_time_us_new)
end

local recent_pause_time_us_new = recent_pause_time_us + time_since_last_poll
redis.call('HSET', port_key, recent_pause_time_field, recent_pause_time_us_new)
end

local function restartRecentTime(port_key, prio, timestamp_last)
local recent_pause_time_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RECENT_PAUSE_TIME_US'
local recent_pause_timestamp_field = 'EST_PORT_STAT_PFC_' .. prio .. '_RECENT_PAUSE_TIMESTAMP'

redis.call('HSET', port_key, recent_pause_timestamp_field, timestamp_last)
redis.call('HSET', port_key, recent_pause_time_field, 0)
end

-- Get the time since the last poll, used to compute total and recent times
local timestamp_field_last = 'PFCWD_POLL_TIMESTAMP_last'
local timestamp_last = redis.call('HGET', 'TIMESTAMP', timestamp_field_last)
local time = redis.call('TIME')
-- convert to microseconds
local timestamp_current = tonumber(time[1]) * 1000000 + tonumber(time[2])

-- save current poll as last poll
local timestamp_string = tostring(timestamp_current)
redis.call('HSET', 'TIMESTAMP', timestamp_field_last, timestamp_string)

local time_since_last_poll = poll_time
-- not first poll
if timestamp_last ~= false then
time_since_last_poll = (timestamp_current - tonumber(timestamp_last))
end

-- Iterate through each queue
local n = table.getn(KEYS)
for i = n, 1, -1 do
Expand Down Expand Up @@ -86,6 +139,29 @@ for i = n, 1, -1 do
end
time_left = detection_time
end

-- estimate history
local pfc_stat_history = redis.call('HGET', counters_table_name .. ':' .. KEYS[i], 'PFC_STAT_HISTORY')
if pfc_stat_history and pfc_stat_history == "enable" then
local port_key = counters_table_name .. ':' .. port_id
local was_paused = parse_boolean(queue_pause_status_last)
local now_paused = parse_boolean(queue_pause_status)

-- Activity has occured
if pfc_rx_packets > pfc_rx_packets_last then
-- fresh recent pause period
if not was_paused then
restartRecentTime(port_key, queue_index, timestamp_last)
end
-- Estimate entire interval paused if there was pfc activity
updateTimePaused(port_key, queue_index, time_since_last_poll)
else
-- queue paused entire interval without activity
if now_paused and was_paused then
updateTimePaused(port_key, queue_index, time_since_last_poll)
end
end
end
end

-- Save values for next run
Expand Down
19 changes: 15 additions & 4 deletions orchagent/pfcwdorch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
#define PFC_WD_ACTION "action"
#define PFC_WD_DETECTION_TIME "detection_time"
#define PFC_WD_RESTORATION_TIME "restoration_time"
#define PFC_STAT_HISTORY "pfc_stat_history"
#define BIG_RED_SWITCH_FIELD "BIG_RED_SWITCH"
#define PFC_WD_IN_STORM "storm"

Expand Down Expand Up @@ -187,6 +188,7 @@ task_process_status PfcWdOrch<DropHandler, ForwardHandler>::createEntry(const st
uint32_t restorationTime = 0;
// According to requirements, drop action is default
PfcWdAction action = PfcWdAction::PFC_WD_ACTION_DROP;
string pfcStatHistory = "disable";
Port port;
if (!gPortsOrch->getPort(key, port))
{
Expand Down Expand Up @@ -263,6 +265,9 @@ task_process_status PfcWdOrch<DropHandler, ForwardHandler>::createEntry(const st
}
}
}
else if(field == PFC_STAT_HISTORY){
pfcStatHistory = value;
}
else
{
SWSS_LOG_ERROR(
Expand Down Expand Up @@ -297,8 +302,13 @@ task_process_status PfcWdOrch<DropHandler, ForwardHandler>::createEntry(const st
SWSS_LOG_ERROR("%s missing", PFC_WD_DETECTION_TIME);
return task_process_status::task_invalid_entry;
}
if (pfcStatHistory != "enable" && pfcStatHistory != "disable")
{
SWSS_LOG_ERROR("%s is invalid value for %s", pfcStatHistory.c_str(), PFC_STAT_HISTORY);
return task_process_status::task_invalid_entry;
}

if (!startWdOnPort(port, detectionTime, restorationTime, action))
if (!startWdOnPort(port, detectionTime, restorationTime, action, pfcStatHistory))
{
SWSS_LOG_ERROR("Failed to start PFC Watchdog on port %s", port.m_alias.c_str());
return task_process_status::task_need_retry;
Expand Down Expand Up @@ -516,7 +526,7 @@ void PfcWdSwOrch<DropHandler, ForwardHandler>::enableBigRedSwitchMode()

template <typename DropHandler, typename ForwardHandler>
bool PfcWdSwOrch<DropHandler, ForwardHandler>::registerInWdDb(const Port& port,
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action)
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory)
{
SWSS_LOG_ENTER();

Expand Down Expand Up @@ -564,6 +574,7 @@ bool PfcWdSwOrch<DropHandler, ForwardHandler>::registerInWdDb(const Port& port,
"" :
to_string(restorationTime * 1000));
countersFieldValues.emplace_back("PFC_WD_ACTION", this->serializeAction(action));
countersFieldValues.emplace_back("PFC_STAT_HISTORY", pfcStatHistory);

this->getCountersTable()->set(queueIdStr, countersFieldValues);

Expand Down Expand Up @@ -747,11 +758,11 @@ PfcWdSwOrch<DropHandler, ForwardHandler>::PfcWdQueueEntry::PfcWdQueueEntry(

template <typename DropHandler, typename ForwardHandler>
bool PfcWdSwOrch<DropHandler, ForwardHandler>::startWdOnPort(const Port& port,
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action)
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory)
{
SWSS_LOG_ENTER();

return registerInWdDb(port, detectionTime, restorationTime, action);
return registerInWdDb(port, detectionTime, restorationTime, action, pfcStatHistory);
}

template <typename DropHandler, typename ForwardHandler>
Expand Down
6 changes: 3 additions & 3 deletions orchagent/pfcwdorch.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class PfcWdOrch: public Orch

virtual void doTask(Consumer& consumer);
virtual bool startWdOnPort(const Port& port,
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action) = 0;
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory) = 0;
virtual bool stopWdOnPort(const Port& port) = 0;

shared_ptr<Table> getCountersTable(void)
Expand Down Expand Up @@ -89,7 +89,7 @@ class PfcWdSwOrch: public PfcWdOrch<DropHandler, ForwardHandler>

void doTask(Consumer& consumer) override;
virtual bool startWdOnPort(const Port& port,
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action);
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory);
virtual bool stopWdOnPort(const Port& port);

task_process_status createEntry(const string& key, const vector<FieldValueTuple>& data) override;
Expand Down Expand Up @@ -121,7 +121,7 @@ class PfcWdSwOrch: public PfcWdOrch<DropHandler, ForwardHandler>
template <typename T>
static unordered_set<string> counterIdsToStr(const vector<T> ids, string (*convert)(T));
bool registerInWdDb(const Port& port,
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action);
uint32_t detectionTime, uint32_t restorationTime, PfcWdAction action, string pfcStatHistory);
void unregisterFromWdDb(const Port& port);
void doTask(swss::NotificationConsumer &wdNotification);

Expand Down
120 changes: 120 additions & 0 deletions tests/mock_tests/portsorch_ut.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3429,6 +3429,126 @@ namespace portsorch_test
sai_port_api = orig_port_api;
}

/* This test verifies that an invalid configuration
* of pfc stat history will not enable the featuer
*/
TEST_F(PortsOrchTest, PfcInvalidHistoryToggle)
{
_hook_sai_switch_api();
// setup the tables with data
std::deque<KeyOpFieldsValuesTuple> entries;
Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME);

// Get SAI default ports to populate DB
auto ports = ut_helper::getInitialSaiPorts();

// Populate port table with SAI ports
for (const auto &it : ports)
{
portTable.set(it.first, it.second);
}
portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } });
portTable.set("PortInitDone", { { "lanes", "0" } });

// refill consumer
gPortsOrch->addExistingData(&portTable);

// Apply configuration :
// create ports
static_cast<Orch *>(gPortsOrch)->doTask();
// Apply configuration
// ports
static_cast<Orch *>(gPortsOrch)->doTask();

ASSERT_TRUE(gPortsOrch->allPortsReady());

// No more tasks
vector<string> ts;
gPortsOrch->dumpPendingTasks(ts);
ASSERT_TRUE(ts.empty());
ts.clear();

entries.clear();
entries.push_back({ "Ethernet0", "SET", { { "pfc_enable", "3,4" }, { "pfcwd_sw_enable", "3,4" } } });
auto portQosMapConsumer = dynamic_cast<Consumer *>(gQosOrch->getExecutor(CFG_PORT_QOS_MAP_TABLE_NAME));
portQosMapConsumer->addToSync(entries);
entries.clear();
static_cast<Orch *>(gQosOrch)->doTask();

entries.push_back({ "GLOBAL", "SET", {{ "POLL_INTERVAL", "200" }}});
entries.push_back({ "Ethernet0", "SET", {
{ "action", "drop" },
{ "detection_time", "200" },
{ "restoration_time", "200" }
} });
auto PfcwdConsumer = dynamic_cast<Consumer *>(gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>->getExecutor(CFG_PFC_WD_TABLE_NAME));
PfcwdConsumer->addToSync(entries);

// trigger the notification
static_cast<Orch*>(gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>)->doTask();
ASSERT_EQ((gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>->m_pfcwd_ports.size()), 1);
entries.clear();

// create pfcwd entry with an invalid history setting
entries.push_back({ "Ethernet0", "SET", {
{ "action", "drop" },
{ "detection_time", "200" },
{ "restoration_time", "200" },
{ "pfc_stat_history", "up" }
} });
PfcwdConsumer->addToSync(entries);
static_cast<Orch*>(gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>)->doTask();
ASSERT_EQ((gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>->m_pfcwd_ports.size()), 1);
entries.clear();

// verify in counters db that history is NOT enabled
Port port;
gPortsOrch->getPort("Ethernet0", port);
auto countersTable = make_shared<Table>(m_counters_db.get(), COUNTERS_TABLE);
auto entryMap = gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>->m_entryMap;

sai_object_id_t queueId = port.m_queue_ids[3];
ASSERT_NE(entryMap.find(queueId), entryMap.end());

string queueIdStr = sai_serialize_object_id(queueId);
vector<FieldValueTuple> countersFieldValues;
countersTable->get(queueIdStr, countersFieldValues);
ASSERT_NE(countersFieldValues.size(), 0);

for (auto &valueTuple : countersFieldValues)
{
if (fvField(valueTuple) == "PFC_STAT_HISTORY")
{
ASSERT_TRUE(fvValue(valueTuple) == "disable");
}
}

queueId = port.m_queue_ids[4];
ASSERT_NE(entryMap.find(queueId), entryMap.end());

queueIdStr = sai_serialize_object_id(queueId);
countersFieldValues.clear();
countersTable->get(queueIdStr, countersFieldValues);
ASSERT_NE(countersFieldValues.size(), 0);

for (auto &valueTuple : countersFieldValues)
{
if (fvField(valueTuple) == "PFC_STAT_HISTORY")
{
ASSERT_TRUE(fvValue(valueTuple) == "disable");
}
}

// remove from monitoring
entries.push_back({ "Ethernet0", "DEL", { {} } });
PfcwdConsumer->addToSync(entries);
entries.clear();
static_cast<Orch *>(gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>)->doTask();
ASSERT_EQ((gPfcwdOrch<PfcWdDlrHandler, PfcWdDlrHandler>->m_pfcwd_ports.size()), 0);

_unhook_sai_switch_api();
}

/*
* The scope of this test is to verify that LAG member is
* added to a LAG before any other object on LAG is created, like RIF, bridge port in warm mode.
Expand Down
Loading