|
1 | 1 | -- KEYS - port name |
2 | 2 | -- ARGV[1] - profile name |
3 | 3 | -- ARGV[2] - new size |
4 | | --- ARGV[3] - pg to add |
| 4 | +-- ARGV[3] - new xon |
| 5 | +-- ARGV[4] - new xoff |
| 6 | +-- ARGV[5] - pg to add |
5 | 7 |
|
6 | 8 | local port = KEYS[1] |
7 | 9 | local input_profile_name = ARGV[1] |
8 | 10 | local input_profile_size = tonumber(ARGV[2]) |
9 | | -local new_pg = ARGV[3] |
| 11 | +local input_profile_xon = tonumber(ARGV[3]) |
| 12 | +local input_profile_xoff = tonumber(ARGV[4]) |
| 13 | +local new_pg = ARGV[5] |
10 | 14 |
|
11 | 15 | local function is_port_with_8lanes(lanes) |
12 | 16 | -- On Spectrum 3, ports with 8 lanes have doubled pipeline latency |
|
55 | 59 |
|
56 | 60 | local asic_keys = redis.call('KEYS', 'ASIC_TABLE*') |
57 | 61 | local pipeline_latency = tonumber(redis.call('HGET', asic_keys[1], 'pipeline_latency')) |
| 62 | +local cell_size = tonumber(redis.call('HGET', asic_keys[1], 'cell_size')) |
| 63 | +local port_reserved_shp = tonumber(redis.call('HGET', asic_keys[1], 'port_reserved_shp')) |
| 64 | +local port_max_shp = tonumber(redis.call('HGET', asic_keys[1], 'port_max_shp')) |
58 | 65 | if is_port_with_8lanes(lanes) then |
59 | 66 | -- The pipeline latency should be adjusted accordingly for ports with 2 buffer units |
60 | 67 | pipeline_latency = pipeline_latency * 2 - 1 |
61 | 68 | egress_mirror_size = egress_mirror_size * 2 |
| 69 | + port_reserved_shp = port_reserved_shp * 2 |
62 | 70 | end |
| 71 | + |
63 | 72 | local lossy_pg_size = pipeline_latency * 1024 |
64 | 73 | accumulative_size = accumulative_size + lossy_pg_size + egress_mirror_size |
65 | 74 |
|
66 | 75 | -- Fetch all keys in BUFFER_PG according to the port |
67 | 76 | redis.call('SELECT', appl_db) |
68 | 77 |
|
| 78 | +local is_shp_enabled |
| 79 | +local shp_size = tonumber(redis.call('HGET', 'BUFFER_POOL_TABLE:ingress_lossless_pool', 'xoff')) |
| 80 | +if shp_size == nil or shp_size == 0 then |
| 81 | + is_shp_enabled = false |
| 82 | +else |
| 83 | + is_shp_enabled = true |
| 84 | +end |
| 85 | +local accumulative_shared_headroom = 0 |
| 86 | + |
69 | 87 | local debuginfo = {} |
70 | 88 |
|
71 | 89 | local function get_number_of_pgs(keyname) |
@@ -122,26 +140,50 @@ end |
122 | 140 | table.insert(debuginfo, 'debug:other overhead:' .. accumulative_size) |
123 | 141 | for pg_key, profile in pairs(all_pgs) do |
124 | 142 | local current_profile_size |
| 143 | + local current_profile_xon |
| 144 | + local current_profile_xoff |
| 145 | + local buffer_profile_table_name = 'BUFFER_PROFILE_TABLE:' |
125 | 146 | if profile ~= input_profile_name then |
126 | | - local referenced_profile_size = redis.call('HGET', 'BUFFER_PROFILE_TABLE:' .. profile, 'size') |
| 147 | + local referenced_profile_size = redis.call('HGET', buffer_profile_table_name .. profile, 'size') |
127 | 148 | if not referenced_profile_size then |
128 | | - referenced_profile_size = redis.call('HGET', '_BUFFER_PROFILE_TABLE:' .. profile, 'size') |
| 149 | + buffer_profile_table_name = '_BUFFER_PROFILE_TABLE:' |
| 150 | + referenced_profile_size = redis.call('HGET', buffer_profile_table_name .. profile, 'size') |
129 | 151 | table.insert(debuginfo, 'debug:pending profile: ' .. profile) |
130 | 152 | end |
131 | 153 | current_profile_size = tonumber(referenced_profile_size) |
| 154 | + current_profile_xon = tonumber(redis.call('HGET', buffer_profile_table_name .. profile, 'xon')) |
| 155 | + current_profile_xoff = tonumber(redis.call('HGET', buffer_profile_table_name .. profile, 'xoff')) |
132 | 156 | else |
133 | 157 | current_profile_size = input_profile_size |
| 158 | + current_profile_xon = input_profile_xon |
| 159 | + current_profile_xoff = input_profile_xoff |
134 | 160 | end |
135 | 161 | if current_profile_size == 0 then |
136 | 162 | current_profile_size = lossy_pg_size |
137 | 163 | end |
138 | 164 | accumulative_size = accumulative_size + current_profile_size * get_number_of_pgs(pg_key) |
139 | | - table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size) |
| 165 | + |
| 166 | + if is_shp_enabled and current_profile_xon and current_profile_xoff then |
| 167 | + if current_profile_size < current_profile_xon + current_profile_xoff then |
| 168 | + accumulative_shared_headroom = accumulative_shared_headroom + (current_profile_xon + current_profile_xoff - current_profile_size) * get_number_of_pgs(pg_key) |
| 169 | + end |
| 170 | + end |
| 171 | + table.insert(debuginfo, 'debug:' .. pg_key .. ':' .. profile .. ':' .. current_profile_size .. ':' .. get_number_of_pgs(pg_key) .. ':accu:' .. accumulative_size .. ':accu_shp:' .. accumulative_shared_headroom) |
140 | 172 | end |
141 | 173 |
|
142 | 174 | if max_headroom_size > accumulative_size then |
143 | | - table.insert(ret, "result:true") |
144 | | - table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. ", the maximum available headroom " .. max_headroom_size) |
| 175 | + if is_shp_enabled then |
| 176 | + local max_shp = (port_max_shp + port_reserved_shp) * cell_size |
| 177 | + if accumulative_shared_headroom > max_shp then |
| 178 | + table.insert(ret, "result:false") |
| 179 | + else |
| 180 | + table.insert(ret, "result:true") |
| 181 | + end |
| 182 | + table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. ", the maximum available headroom " .. max_headroom_size .. ", the port SHP " .. accumulative_shared_headroom .. ", max SHP " .. max_shp) |
| 183 | + else |
| 184 | + table.insert(ret, "result:true") |
| 185 | + table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. ", the maximum available headroom " .. max_headroom_size) |
| 186 | + end |
145 | 187 | else |
146 | 188 | table.insert(ret, "result:false") |
147 | 189 | table.insert(ret, "debug:Accumulative headroom on port " .. accumulative_size .. " exceeds the maximum available headroom which is " .. max_headroom_size) |
|
0 commit comments