Skip to content

Commit cc06b18

Browse files
authored
[QoS] Optimize QoS operations of buffer setting and queue information fetching (#2752)
What I did Optimize QoS operations: Cache queue information to avoid fetching them from SAI every time The cache is created when a queue's information is fetched for the first time Avoid calling SAI API to fetch queue information if it exists in the cache Cache will be cleared for the queues of a certain port when the port is removed Apply buffer items (table: BUFFER_QUEUE, BUFFER_PG, BUFFER_PORT_INGRESS_PROFILE_LIST, BUFFER_PORT_EGRESS_PROFILE_LIST) only if they are updated There is only one attribute, profile or profile_list, in the items in all the tables, and the attribute is stored in BufferOrch::m_buffer_type_maps, which means we can just check whether the new value is the same as the one stored in the mapping and apply to SAI only if it differs. For the BUFFER_QUEUE table, it's possible that it needs to retry when a PFC storm is detected on the queue. A new set m_partiallyAppliedQueues is introduced to handle this case. In any case, if it fails to call SAI API, we do not repeat calling it when the buffer table is set with the same value of attribute because it's users' responsibility to correct the configuration. Signed-off-by: Stephen Sun [email protected] Why I did it Theoretically, it should be fast for both operations. But there is a mutex in sairedis enforcing a critical section for all SAI APIs. In case there is another SAI API ongoing, eg. fetching the counter, it has to wait for the current one to finish which can take more milliseconds. This occurs frequently when a large number of buffer PG or queue items are being set and the accumulated time is significant. In this scenario, two threads run parallelly and they will compete the critical section. Syncd main thread in which the buffer PG, queue setting API, or queue info getting API runs, FlexCounter thread in which the counter is fetched. How I verified it Mock test Regression test Details if related An example of queue information fetching. For each queue, the information is fetched for 5 times, which consumes ~0.25 seconds. With the caching logic, it will be called only once. 2023-04-20.18:01:00.634562|a|INIT_VIEW 2023-04-20.18:01:00.635586|A|SAI_STATUS_SUCCESS -- 2023-04-20.18:01:43.290205|g|SAI_OBJECT_TYPE_QUEUE:oid:0x15000000000549|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_ALL|SAI_QUEUE_ATTR_INDEX=205 2023-04-20.18:01:43.331625|G|SAI_STATUS_SUCCESS|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_UNICAST|SAI_QUEUE_ATTR_INDEX=4 -- 2023-04-20.18:01:46.420931|g|SAI_OBJECT_TYPE_QUEUE:oid:0x15000000000549|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_ALL|SAI_QUEUE_ATTR_INDEX=0 2023-04-20.18:01:46.422113|G|SAI_STATUS_SUCCESS|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_UNICAST|SAI_QUEUE_ATTR_INDEX=4 -- 2023-04-20.18:01:56.825879|g|SAI_OBJECT_TYPE_QUEUE:oid:0x15000000000549|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_ALL|SAI_QUEUE_ATTR_INDEX=24 2023-04-20.18:01:56.866720|G|SAI_STATUS_SUCCESS|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_UNICAST|SAI_QUEUE_ATTR_INDEX=4 -- 2023-04-20.18:02:37.248679|a|APPLY_VIEW 2023-04-20.18:02:37.249435|A|SAI_STATUS_SUCCESS -- 2023-04-20.18:02:54.824194|g|SAI_OBJECT_TYPE_QUEUE:oid:0x15000000000549|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_ALL|SAI_QUEUE_ATTR_INDEX=205 2023-04-20.18:02:54.866955|G|SAI_STATUS_SUCCESS|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_UNICAST|SAI_QUEUE_ATTR_INDEX=4 -- 2023-04-20.18:02:54.932174|g|SAI_OBJECT_TYPE_QUEUE:oid:0x15000000000549|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_ALL|SAI_QUEUE_ATTR_INDEX=205 2023-04-20.18:02:54.965082|G|SAI_STATUS_SUCCESS|SAI_QUEUE_ATTR_TYPE=SAI_QUEUE_TYPE_UNICAST|SAI_QUEUE_ATTR_INDEX=4
1 parent fe8c395 commit cc06b18

6 files changed

Lines changed: 247 additions & 18 deletions

File tree

orchagent/bufferorch.cpp

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -839,6 +839,21 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
839839
return task_process_status::task_failed;
840840
}
841841

842+
string old_buffer_profile_name;
843+
if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name)
844+
&& (old_buffer_profile_name == buffer_profile_name))
845+
{
846+
if (m_partiallyAppliedQueues.find(key) == m_partiallyAppliedQueues.end())
847+
{
848+
SWSS_LOG_INFO("Skip setting buffer queue %s to %s since it is not changed", key.c_str(), buffer_profile_name.c_str());
849+
return task_process_status::task_success;
850+
}
851+
else
852+
{
853+
m_partiallyAppliedQueues.erase(key);
854+
}
855+
}
856+
842857
SWSS_LOG_NOTICE("Set buffer queue %s to %s", key.c_str(), buffer_profile_name.c_str());
843858

844859
setObjectReference(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name);
@@ -854,6 +869,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
854869
sai_buffer_profile = SAI_NULL_OBJECT_ID;
855870
SWSS_LOG_NOTICE("Remove buffer queue %s", key.c_str());
856871
removeObject(m_buffer_type_maps, APP_BUFFER_QUEUE_TABLE_NAME, key);
872+
m_partiallyAppliedQueues.erase(key);
857873
}
858874
else
859875
{
@@ -898,6 +914,7 @@ task_process_status BufferOrch::processQueue(KeyOpFieldsValuesTuple &tuple)
898914
if (port.m_queue_lock[ind])
899915
{
900916
SWSS_LOG_WARN("Queue %zd on port %s is locked, will retry", ind, port_name.c_str());
917+
m_partiallyAppliedQueues.insert(key);
901918
return task_process_status::task_need_retry;
902919
}
903920
queue_id = port.m_queue_ids[ind];
@@ -1038,6 +1055,14 @@ task_process_status BufferOrch::processPriorityGroup(KeyOpFieldsValuesTuple &tup
10381055
return task_process_status::task_failed;
10391056
}
10401057

1058+
string old_buffer_profile_name;
1059+
if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, old_buffer_profile_name)
1060+
&& (old_buffer_profile_name == buffer_profile_name))
1061+
{
1062+
SWSS_LOG_INFO("Skip setting buffer priority group %s to %s since it is not changed", key.c_str(), buffer_profile_name.c_str());
1063+
return task_process_status::task_success;
1064+
}
1065+
10411066
SWSS_LOG_NOTICE("Set buffer PG %s to %s", key.c_str(), buffer_profile_name.c_str());
10421067

10431068
setObjectReference(m_buffer_type_maps, APP_BUFFER_PG_TABLE_NAME, key, buffer_profile_field_name, buffer_profile_name);
@@ -1209,6 +1234,14 @@ task_process_status BufferOrch::processIngressBufferProfileList(KeyOpFieldsValue
12091234
return task_process_status::task_failed;
12101235
}
12111236

1237+
string old_profile_name_list;
1238+
if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, old_profile_name_list)
1239+
&& (old_profile_name_list == profile_name_list))
1240+
{
1241+
SWSS_LOG_INFO("Skip setting buffer ingress profile list %s to %s since it is not changed", key.c_str(), profile_name_list.c_str());
1242+
return task_process_status::task_success;
1243+
}
1244+
12121245
setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list);
12131246

12141247
attr.value.objlist.count = (uint32_t)profile_list.size();
@@ -1280,6 +1313,14 @@ task_process_status BufferOrch::processEgressBufferProfileList(KeyOpFieldsValues
12801313
return task_process_status::task_failed;
12811314
}
12821315

1316+
string old_profile_name_list;
1317+
if (doesObjectExist(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, old_profile_name_list)
1318+
&& (old_profile_name_list == profile_name_list))
1319+
{
1320+
SWSS_LOG_INFO("Skip setting buffer egress profile list %s to %s since it is not changed", key.c_str(), profile_name_list.c_str());
1321+
return task_process_status::task_success;
1322+
}
1323+
12831324
setObjectReference(m_buffer_type_maps, APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, key, buffer_profile_list_field_name, profile_name_list);
12841325

12851326
attr.value.objlist.count = (uint32_t)profile_list.size();

orchagent/bufferorch.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ class BufferOrch : public Orch
7272
unique_ptr<DBConnector> m_countersDb;
7373

7474
bool m_isBufferPoolWatermarkCounterIdListGenerated = false;
75-
75+
set<string> m_partiallyAppliedQueues;
7676
};
7777
#endif /* SWSS_BUFFORCH_H */
7878

orchagent/portsorch.cpp

Lines changed: 33 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2448,19 +2448,36 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin
24482448
{
24492449
SWSS_LOG_ENTER();
24502450

2451-
sai_attribute_t attr[2];
2452-
attr[0].id = SAI_QUEUE_ATTR_TYPE;
2453-
attr[1].id = SAI_QUEUE_ATTR_INDEX;
2451+
auto const &queueInfoRef = m_queueInfo.find(queue_id);
24542452

2455-
sai_status_t status = sai_queue_api->get_queue_attribute(queue_id, 2, attr);
2456-
if (status != SAI_STATUS_SUCCESS)
2453+
sai_attribute_t attr[2];
2454+
if (queueInfoRef == m_queueInfo.end())
24572455
{
2458-
SWSS_LOG_ERROR("Failed to get queue type and index for queue %" PRIu64 " rv:%d", queue_id, status);
2459-
task_process_status handle_status = handleSaiGetStatus(SAI_API_QUEUE, status);
2460-
if (handle_status != task_process_status::task_success)
2456+
attr[0].id = SAI_QUEUE_ATTR_TYPE;
2457+
attr[1].id = SAI_QUEUE_ATTR_INDEX;
2458+
2459+
sai_status_t status = sai_queue_api->get_queue_attribute(queue_id, 2, attr);
2460+
if (status != SAI_STATUS_SUCCESS)
24612461
{
2462-
return false;
2462+
SWSS_LOG_ERROR("Failed to get queue type and index for queue %" PRIu64 " rv:%d", queue_id, status);
2463+
task_process_status handle_status = handleSaiGetStatus(SAI_API_QUEUE, status);
2464+
if (handle_status != task_process_status::task_success)
2465+
{
2466+
return false;
2467+
}
24632468
}
2469+
2470+
SWSS_LOG_INFO("Caching information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id);
2471+
2472+
m_queueInfo[queue_id].type = static_cast<sai_queue_type_t>(attr[0].value.s32);
2473+
m_queueInfo[queue_id].index = attr[1].value.u8;
2474+
}
2475+
else
2476+
{
2477+
attr[0].value.s32 = m_queueInfo[queue_id].type;
2478+
attr[1].value.u8 = m_queueInfo[queue_id].index;
2479+
2480+
SWSS_LOG_INFO("Fetched cached information (index %d type %d) for queue %" PRIx64, attr[1].value.u8, attr[0].value.s32, queue_id);
24642481
}
24652482

24662483
switch (attr[0].value.s32)
@@ -2478,7 +2495,7 @@ bool PortsOrch::getQueueTypeAndIndex(sai_object_id_t queue_id, string &type, uin
24782495
type = "SAI_QUEUE_TYPE_UNICAST_VOQ";
24792496
break;
24802497
default:
2481-
SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIu64 " queue", attr[0].value.s32, queue_id);
2498+
SWSS_LOG_ERROR("Got unsupported queue type %d for %" PRIx64 " queue", attr[0].value.s32, queue_id);
24822499
throw runtime_error("Got unsupported queue type");
24832500
}
24842501

@@ -2755,6 +2772,12 @@ sai_status_t PortsOrch::removePort(sai_object_id_t port_id)
27552772

27562773
removePortSerdesAttribute(port_id);
27572774

2775+
for (auto queue_id : port.m_queue_ids)
2776+
{
2777+
SWSS_LOG_INFO("Removing cached information for queue %" PRIx64, queue_id);
2778+
m_queueInfo.erase(queue_id);
2779+
}
2780+
27582781
sai_status_t status = sai_port_api->remove_port(port_id);
27592782
if (status != SAI_STATUS_SUCCESS)
27602783
{

orchagent/portsorch.h

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,14 @@ struct VlanMemberUpdate
9797
bool add;
9898
};
9999

100+
struct queueInfo
101+
{
102+
// SAI_QUEUE_ATTR_TYPE
103+
sai_queue_type_t type;
104+
// SAI_QUEUE_ATTR_INDEX
105+
sai_uint8_t index;
106+
};
107+
100108
class PortsOrch : public Orch, public Subject
101109
{
102110
public:
@@ -465,5 +473,6 @@ class PortsOrch : public Orch, public Subject
465473
set<sai_object_id_t> m_macsecEnabledPorts;
466474

467475
std::unordered_set<std::string> generateCounterStats(const string& type, bool gearbox = false);
476+
map<sai_object_id_t, struct queueInfo> m_queueInfo;
468477
};
469478
#endif /* SWSS_PORTSORCH_H */

tests/mock_tests/bufferorch_ut.cpp

Lines changed: 112 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,10 @@ namespace bufferorch_test
1919

2020
sai_port_api_t ut_sai_port_api;
2121
sai_port_api_t *pold_sai_port_api;
22+
sai_buffer_api_t ut_sai_buffer_api;
23+
sai_buffer_api_t *pold_sai_buffer_api;
24+
sai_queue_api_t ut_sai_queue_api;
25+
sai_queue_api_t *pold_sai_queue_api;
2226

2327
shared_ptr<swss::DBConnector> m_app_db;
2428
shared_ptr<swss::DBConnector> m_app_state_db;
@@ -51,17 +55,47 @@ namespace bufferorch_test
5155
return pold_sai_port_api->set_port_attribute(port_id, attr);
5256
}
5357

54-
void _hook_sai_port_api()
58+
uint32_t _ut_stub_set_pg_count;
59+
sai_status_t _ut_stub_sai_set_ingress_priority_group_attribute(
60+
_In_ sai_object_id_t ingress_priority_group_id,
61+
_In_ const sai_attribute_t *attr)
62+
{
63+
_ut_stub_set_pg_count++;
64+
return pold_sai_buffer_api->set_ingress_priority_group_attribute(ingress_priority_group_id, attr);
65+
}
66+
67+
uint32_t _ut_stub_set_queue_count;
68+
sai_status_t _ut_stub_sai_set_queue_attribute(
69+
_In_ sai_object_id_t queue_id,
70+
_In_ const sai_attribute_t *attr)
71+
{
72+
_ut_stub_set_queue_count++;
73+
return pold_sai_queue_api->set_queue_attribute(queue_id, attr);
74+
}
75+
76+
void _hook_sai_apis()
5577
{
5678
ut_sai_port_api = *sai_port_api;
5779
pold_sai_port_api = sai_port_api;
5880
ut_sai_port_api.set_port_attribute = _ut_stub_sai_set_port_attribute;
5981
sai_port_api = &ut_sai_port_api;
82+
83+
ut_sai_buffer_api = *sai_buffer_api;
84+
pold_sai_buffer_api = sai_buffer_api;
85+
ut_sai_buffer_api.set_ingress_priority_group_attribute = _ut_stub_sai_set_ingress_priority_group_attribute;
86+
sai_buffer_api = &ut_sai_buffer_api;
87+
88+
ut_sai_queue_api = *sai_queue_api;
89+
pold_sai_queue_api = sai_queue_api;
90+
ut_sai_queue_api.set_queue_attribute = _ut_stub_sai_set_queue_attribute;
91+
sai_queue_api = &ut_sai_queue_api;
6092
}
6193

62-
void _unhook_sai_port_api()
94+
void _unhook_sai_apis()
6395
{
6496
sai_port_api = pold_sai_port_api;
97+
sai_buffer_api = pold_sai_buffer_api;
98+
sai_queue_api = pold_sai_queue_api;
6599
}
66100

67101
struct BufferOrchTest : public ::testing::Test
@@ -341,6 +375,7 @@ namespace bufferorch_test
341375

342376
TEST_F(BufferOrchTest, BufferOrchTestBufferPgReferencingObjRemoveThenAdd)
343377
{
378+
_hook_sai_apis();
344379
vector<string> ts;
345380
std::deque<KeyOpFieldsValuesTuple> entries;
346381
Table bufferPgTable = Table(m_app_db.get(), APP_BUFFER_PG_TABLE_NAME);
@@ -398,18 +433,34 @@ namespace bufferorch_test
398433
bufferProfileConsumer->addToSync(entries);
399434
entries.clear();
400435
// Drain BUFFER_PROFILE_TABLE table
436+
auto sai_pg_attr_set_count = _ut_stub_set_pg_count;
401437
static_cast<Orch *>(gBufferOrch)->doTask();
402438
// Make sure the dependency recovers
403439
CheckDependency(APP_BUFFER_PG_TABLE_NAME, "Ethernet0:0", "profile", APP_BUFFER_PROFILE_TABLE_NAME, "ingress_lossy_profile");
440+
ASSERT_EQ(++sai_pg_attr_set_count, _ut_stub_set_pg_count);
404441

405442
// All items have been drained
406443
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
407444
ASSERT_TRUE(ts.empty());
445+
446+
// Try applying the same profile, which should not call SAI API
447+
entries.push_back({"Ethernet0:0", "SET",
448+
{
449+
{"profile", "ingress_lossy_profile"}
450+
}});
451+
bufferPgConsumer->addToSync(entries);
452+
entries.clear();
453+
sai_pg_attr_set_count = _ut_stub_set_pg_count;
454+
static_cast<Orch *>(gBufferOrch)->doTask();
455+
ASSERT_EQ(sai_pg_attr_set_count, _ut_stub_set_pg_count);
456+
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
457+
ASSERT_TRUE(ts.empty());
458+
_unhook_sai_apis();
408459
}
409460

410461
TEST_F(BufferOrchTest, BufferOrchTestReferencingObjRemoveThenAdd)
411462
{
412-
_hook_sai_port_api();
463+
_hook_sai_apis();
413464
vector<string> ts;
414465
std::deque<KeyOpFieldsValuesTuple> entries;
415466
Table bufferProfileListTable = Table(m_app_db.get(), APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME);
@@ -494,6 +545,29 @@ namespace bufferorch_test
494545
// As an side-effect, all pending notifications should be drained
495546
ASSERT_TRUE(ts.empty());
496547

548+
// Apply a buffer item only if it is changed
549+
_ut_stub_expected_profile_list_type = SAI_PORT_ATTR_QOS_INGRESS_BUFFER_PROFILE_LIST;
550+
_ut_stub_expected_profile_count = 1;
551+
entries.push_back({"Ethernet0", "SET",
552+
{
553+
{"profile_list", "ingress_lossy_profile"}
554+
}});
555+
consumer = dynamic_cast<Consumer *>(gBufferOrch->getExecutor(APP_BUFFER_PORT_INGRESS_PROFILE_LIST_NAME));
556+
consumer->addToSync(entries);
557+
sai_port_profile_list_create_count = _ut_stub_port_profile_list_add_count;
558+
// Drain BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE table
559+
static_cast<Orch *>(gBufferOrch)->doTask();
560+
ASSERT_EQ(++sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count);
561+
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
562+
ASSERT_TRUE(ts.empty());
563+
564+
// Try applying it for the second time, which should not call SAI API
565+
consumer->addToSync(entries);
566+
static_cast<Orch *>(gBufferOrch)->doTask();
567+
ASSERT_EQ(sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count);
568+
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
569+
ASSERT_TRUE(ts.empty());
570+
497571
// To satisfy the coverage requirement
498572
bufferProfileListTable.set("Ethernet0",
499573
{
@@ -505,12 +579,12 @@ namespace bufferorch_test
505579
ASSERT_EQ(ts[0], "BUFFER_PORT_INGRESS_PROFILE_LIST_TABLE:Ethernet0|SET|profile_list:ingress_no_exist_profile");
506580
ts.clear();
507581

508-
_unhook_sai_port_api();
582+
_unhook_sai_apis();
509583
}
510584

511585
TEST_F(BufferOrchTest, BufferOrchTestCreateAndRemoveEgressProfileList)
512586
{
513-
_hook_sai_port_api();
587+
_hook_sai_apis();
514588
vector<string> ts;
515589
std::deque<KeyOpFieldsValuesTuple> entries;
516590
Table bufferPoolTable = Table(m_app_db.get(), APP_BUFFER_POOL_TABLE_NAME);
@@ -553,9 +627,21 @@ namespace bufferorch_test
553627
CheckDependency(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME, "Ethernet0", "profile_list",
554628
APP_BUFFER_PROFILE_TABLE_NAME, "egress_lossless_profile");
555629

630+
// Try applying it for the second time, which should not call SAI API
631+
entries.push_back({"Ethernet0", "SET",
632+
{
633+
{"profile_list", "egress_lossless_profile"}
634+
}});
635+
auto consumer = dynamic_cast<Consumer *>(gBufferOrch->getExecutor(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME));
636+
consumer->addToSync(entries);
637+
entries.clear();
638+
static_cast<Orch *>(gBufferOrch)->doTask();
639+
ASSERT_EQ(sai_port_profile_list_create_count, _ut_stub_port_profile_list_add_count);
640+
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
641+
ASSERT_TRUE(ts.empty());
642+
556643
// Remove egress port profile list
557644
entries.push_back({"Ethernet0", "DEL", {}});
558-
auto consumer = dynamic_cast<Consumer *>(gBufferOrch->getExecutor(APP_BUFFER_PORT_EGRESS_PROFILE_LIST_NAME));
559645
consumer->addToSync(entries);
560646
entries.clear();
561647
// Drain BUFFER_PORT_EGRESS_PROFILE_LIST_TABLE table
@@ -567,6 +653,25 @@ namespace bufferorch_test
567653
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
568654
ASSERT_TRUE(ts.empty());
569655

570-
_unhook_sai_port_api();
656+
// Queue table
657+
entries.push_back({"Ethernet0:0", "SET",
658+
{
659+
{"profile", "egress_lossless_profile"}
660+
}});
661+
consumer = dynamic_cast<Consumer *>(gBufferOrch->getExecutor(APP_BUFFER_QUEUE_TABLE_NAME));
662+
consumer->addToSync(entries);
663+
auto sai_queue_set_count = _ut_stub_set_queue_count;
664+
static_cast<Orch *>(gBufferOrch)->doTask();
665+
ASSERT_EQ(++sai_queue_set_count, _ut_stub_set_queue_count);
666+
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
667+
ASSERT_TRUE(ts.empty());
668+
669+
consumer->addToSync(entries);
670+
static_cast<Orch *>(gBufferOrch)->doTask();
671+
ASSERT_EQ(sai_queue_set_count, _ut_stub_set_queue_count);
672+
static_cast<Orch *>(gBufferOrch)->dumpPendingTasks(ts);
673+
ASSERT_TRUE(ts.empty());
674+
675+
_unhook_sai_apis();
571676
}
572677
}

0 commit comments

Comments
 (0)