Skip to content

Commit ffea522

Browse files
[portsorch] fix crash when number of PGs returned 0 (sonic-net#3966)
What I did Fix issue reported in sonic-net#3961. Regression happened after move to bulk implementation. The error handling of no PGs, queues was incorrect. Bulk array preparation and reading should use a consistent approach. For example, if a port is skipped in the input array, it should also be skipped when reading from the output array. Why I did it Fix issue reported in sonic-net#3961.
1 parent ea54ff8 commit ffea522

3 files changed

Lines changed: 115 additions & 13 deletions

File tree

orchagent/portsorch.cpp

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -6464,10 +6464,16 @@ void PortsOrch::initializePriorityGroupsBulk(std::vector<Port>& ports)
64646464

64656465
bulker.executeGet();
64666466

6467-
for (size_t idx = 0; idx < portCount; idx++)
6467+
size_t idx = 0;
6468+
for (const auto& port: ports)
64686469
{
6469-
const auto& port = ports[idx];
6470+
if (port.m_priority_group_ids.size() == 0)
6471+
{
6472+
continue;
6473+
}
6474+
64706475
const auto status = bulker.statuses[idx];
6476+
idx++;
64716477

64726478
if (status != SAI_STATUS_SUCCESS)
64736479
{
@@ -6542,10 +6548,16 @@ void PortsOrch::initializeQueuesBulk(std::vector<Port>& ports)
65426548

65436549
bulker.executeGet();
65446550

6545-
for (size_t idx = 0; idx < portCount; idx++)
6551+
size_t idx = 0;
6552+
for (const auto& port: ports)
65466553
{
6547-
const auto& port = ports[idx];
6554+
if (port.m_queue_ids.size() == 0)
6555+
{
6556+
continue;
6557+
}
6558+
65486559
const auto status = bulker.statuses[idx];
6560+
idx++;
65496561

65506562
if (status != SAI_STATUS_SUCCESS)
65516563
{
@@ -6622,10 +6634,17 @@ void PortsOrch::initializeSchedulerGroupsBulk(std::vector<Port>& ports)
66226634

66236635
bulker.executeGet();
66246636

6637+
size_t bulkIdx = 0;
66256638
for (size_t idx = 0; idx < portCount; idx++)
66266639
{
66276640
const auto& port = ports[idx];
6628-
const auto status = bulker.statuses[idx];
6641+
if (scheduler_group_ids[idx].size() == 0)
6642+
{
6643+
continue;
6644+
}
6645+
6646+
const auto status = bulker.statuses[bulkIdx];
6647+
bulkIdx++;
66296648

66306649
if (status != SAI_STATUS_SUCCESS)
66316650
{

tests/mock_tests/portsorch_ut.cpp

Lines changed: 81 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2925,14 +2925,42 @@ namespace portsorch_test
29252925
}
29262926
}
29272927

2928-
TEST_F(PortsOrchTest, PortHostIfCreateFailed)
2928+
TEST_F(PortsOrchTest, PortsWithNoPGsQueuesSchedulerGroups)
29292929
{
29302930
Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME);
29312931

2932-
auto original_api = sai_hostif_api->create_hostif;
2933-
auto hostIfSpy = SpyOn<SAI_API_HOSTIF, SAI_OBJECT_TYPE_HOSTIF>(&sai_hostif_api->create_hostif);
2934-
hostIfSpy->callFake([&](sai_object_id_t*, sai_object_id_t, uint32_t, const sai_attribute_t*) -> sai_status_t {
2935-
return SAI_STATUS_INSUFFICIENT_RESOURCES;
2932+
auto original_api = sai_port_api->get_ports_attribute;
2933+
// Mock SAI port API to return 0 number of PGs, queues and scheduler groups
2934+
auto spy = SpyOn<SAI_API_PORT, SAI_OBJECT_TYPE_PORT>(&sai_port_api->get_ports_attribute);
2935+
spy->callFake([&](
2936+
uint32_t object_count,
2937+
const sai_object_id_t *object_id,
2938+
const uint32_t *attr_count,
2939+
sai_attribute_t **attr_list,
2940+
sai_bulk_op_error_mode_t mode,
2941+
sai_status_t *object_statuses) -> sai_status_t
2942+
{
2943+
assert(object_count > 1);
2944+
assert(attr_count[0] > 1);
2945+
switch (attr_list[0]->id)
2946+
{
2947+
case SAI_PORT_ATTR_NUMBER_OF_INGRESS_PRIORITY_GROUPS:
2948+
case SAI_PORT_ATTR_QOS_NUMBER_OF_QUEUES:
2949+
case SAI_PORT_ATTR_QOS_NUMBER_OF_SCHEDULER_GROUPS:
2950+
for (size_t i = 0; i < object_count; i++)
2951+
{
2952+
attr_list[i]->value.u32 = 0;
2953+
object_statuses[i] = SAI_STATUS_SUCCESS;
2954+
}
2955+
return SAI_STATUS_SUCCESS;
2956+
}
2957+
return original_api(
2958+
object_count,
2959+
object_id,
2960+
attr_count,
2961+
attr_list,
2962+
mode,
2963+
object_statuses);
29362964
}
29372965
);
29382966

@@ -2956,14 +2984,15 @@ namespace portsorch_test
29562984

29572985
static_cast<Orch *>(gPortsOrch)->doTask();
29582986

2959-
sai_hostif_api->create_hostif = original_api;
2960-
29612987
Port port;
29622988
gPortsOrch->getPort("Ethernet0", port);
29632989

2964-
ASSERT_FALSE(port.m_init);
2990+
ASSERT_TRUE(port.m_init);
2991+
ASSERT_EQ(port.m_priority_group_ids.size(), 0);
2992+
ASSERT_EQ(port.m_queue_ids.size(), 0);
29652993
}
29662994

2995+
29672996
TEST_F(PortsOrchTest, PfcDlrHandlerCallingDlrInitAttribute)
29682997
{
29692998
_hook_sai_port_api();
@@ -3908,4 +3937,48 @@ namespace portsorch_test
39083937
stateDbSet = stateTable.hget("Ethernet0", "max_priority_groups", value);
39093938
ASSERT_TRUE(stateDbSet);
39103939
}
3940+
3941+
struct PortsOrchNegativeTests : PortsOrchTest
3942+
{
3943+
};
3944+
3945+
TEST_F(PortsOrchNegativeTests, PortHostIfCreateFailed)
3946+
{
3947+
Table portTable = Table(m_app_db.get(), APP_PORT_TABLE_NAME);
3948+
3949+
auto original_api = sai_hostif_api->create_hostif;
3950+
auto hostIfSpy = SpyOn<SAI_API_HOSTIF, SAI_OBJECT_TYPE_HOSTIF>(&sai_hostif_api->create_hostif);
3951+
hostIfSpy->callFake([&](sai_object_id_t*, sai_object_id_t, uint32_t, const sai_attribute_t*) -> sai_status_t {
3952+
return SAI_STATUS_INSUFFICIENT_RESOURCES;
3953+
}
3954+
);
3955+
3956+
// Get SAI default ports to populate DB
3957+
3958+
auto ports = ut_helper::getInitialSaiPorts();
3959+
3960+
// Populate pot table with SAI ports
3961+
for (const auto &it : ports)
3962+
{
3963+
portTable.set(it.first, it.second);
3964+
}
3965+
3966+
// Set PortConfigDone
3967+
portTable.set("PortConfigDone", { { "count", to_string(ports.size()) } });
3968+
3969+
gPortsOrch->addExistingData(&portTable);
3970+
3971+
// Apply configuration :
3972+
// create ports
3973+
3974+
static_cast<Orch *>(gPortsOrch)->doTask();
3975+
3976+
sai_hostif_api->create_hostif = original_api;
3977+
3978+
Port port;
3979+
gPortsOrch->getPort("Ethernet0", port);
3980+
3981+
ASSERT_FALSE(port.m_init);
3982+
}
3983+
39113984
}

tests/mock_tests/saispy.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,3 +118,13 @@ std::shared_ptr<SaiSpyFunctor<n, objtype, sai_status_t, sai_object_id_t, uint32_
118118

119119
return std::make_shared<SaiSpyGetAttrFunctor>(fn_ptr);
120120
}
121+
122+
// get bulk entry attribute
123+
template <int n, int objtype>
124+
std::shared_ptr<SaiSpyFunctor<n, objtype, sai_status_t, uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*>>
125+
SpyOn(sai_status_t (**fn_ptr)(uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*))
126+
{
127+
using SaiSpyGetAttrFunctor = SaiSpyFunctor<n, objtype, sai_status_t, uint32_t, const sai_object_id_t*, const uint32_t*, sai_attribute_t**, sai_bulk_op_error_mode_t, sai_status_t*>;
128+
129+
return std::make_shared<SaiSpyGetAttrFunctor>(fn_ptr);
130+
}

0 commit comments

Comments
 (0)