@@ -184,6 +184,19 @@ struct mlxsw_sp_acl_tcam_vgroup {
184184struct mlxsw_sp_acl_tcam_rehash_ctx {
185185 void * hints_priv ;
186186 bool this_is_rollback ;
187+ struct mlxsw_sp_acl_tcam_vchunk * current_vchunk ; /* vchunk being
188+ * currently migrated.
189+ */
190+ struct mlxsw_sp_acl_tcam_ventry * start_ventry ; /* ventry to start
191+ * migration from in
192+ * a vchunk being
193+ * currently migrated.
194+ */
195+ struct mlxsw_sp_acl_tcam_ventry * stop_ventry ; /* ventry to stop
196+ * migration at
197+ * a vchunk being
198+ * currently migrated.
199+ */
187200};
188201
189202struct mlxsw_sp_acl_tcam_vregion {
@@ -755,6 +768,31 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
755768 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule (vregion );
756769}
757770
771+ static void
772+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (struct mlxsw_sp_acl_tcam_vchunk * vchunk )
773+ {
774+ struct mlxsw_sp_acl_tcam_vregion * vregion = vchunk -> vregion ;
775+
776+ /* If a rule was added or deleted from vchunk which is currently
777+ * under rehash migration, we have to reset the ventry pointers
778+ * to make sure all rules are properly migrated.
779+ */
780+ if (vregion -> rehash .ctx .current_vchunk == vchunk ) {
781+ vregion -> rehash .ctx .start_ventry = NULL ;
782+ vregion -> rehash .ctx .stop_ventry = NULL ;
783+ }
784+ }
785+
786+ static void
787+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed (struct mlxsw_sp_acl_tcam_vregion * vregion )
788+ {
789+ /* If a chunk was added or deleted from vregion we have to reset
790+ * the current chunk pointer to make sure all chunks
791+ * are properly migrated.
792+ */
793+ vregion -> rehash .ctx .current_vchunk = NULL ;
794+ }
795+
758796static struct mlxsw_sp_acl_tcam_vregion *
759797mlxsw_sp_acl_tcam_vregion_create (struct mlxsw_sp * mlxsw_sp ,
760798 struct mlxsw_sp_acl_tcam_vgroup * vgroup ,
@@ -989,6 +1027,7 @@ mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
9891027 goto err_chunk_create ;
9901028 }
9911029
1030+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed (vregion );
9921031 list_add_tail (& vchunk -> list , & vregion -> vchunk_list );
9931032 mutex_unlock (& vregion -> lock );
9941033
@@ -1012,6 +1051,7 @@ mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
10121051 struct mlxsw_sp_acl_tcam_vgroup * vgroup = vchunk -> vgroup ;
10131052
10141053 mutex_lock (& vregion -> lock );
1054+ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed (vregion );
10151055 list_del (& vchunk -> list );
10161056 if (vchunk -> chunk2 )
10171057 mlxsw_sp_acl_tcam_chunk_destroy (mlxsw_sp , vchunk -> chunk2 );
@@ -1141,6 +1181,7 @@ static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
11411181 }
11421182
11431183 list_add_tail (& ventry -> list , & vchunk -> ventry_list );
1184+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (vchunk );
11441185 mutex_unlock (& vregion -> lock );
11451186
11461187 return 0 ;
@@ -1157,6 +1198,7 @@ static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
11571198 struct mlxsw_sp_acl_tcam_vregion * vregion = vchunk -> vregion ;
11581199
11591200 mutex_lock (& vregion -> lock );
1201+ mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed (vchunk );
11601202 list_del (& ventry -> list );
11611203 mlxsw_sp_acl_tcam_entry_destroy (mlxsw_sp , ventry -> entry );
11621204 mutex_unlock (& vregion -> lock );
@@ -1223,15 +1265,20 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
12231265 }
12241266 vchunk -> chunk2 = vchunk -> chunk ;
12251267 vchunk -> chunk = new_chunk ;
1268+ ctx -> current_vchunk = vchunk ;
1269+ ctx -> start_ventry = NULL ;
1270+ ctx -> stop_ventry = NULL ;
12261271 return 0 ;
12271272}
12281273
12291274static void
12301275mlxsw_sp_acl_tcam_vchunk_migrate_end (struct mlxsw_sp * mlxsw_sp ,
1231- struct mlxsw_sp_acl_tcam_vchunk * vchunk )
1276+ struct mlxsw_sp_acl_tcam_vchunk * vchunk ,
1277+ struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
12321278{
12331279 mlxsw_sp_acl_tcam_chunk_destroy (mlxsw_sp , vchunk -> chunk2 );
12341280 vchunk -> chunk2 = NULL ;
1281+ ctx -> current_vchunk = NULL ;
12351282}
12361283
12371284static int
@@ -1254,7 +1301,22 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
12541301 return 0 ;
12551302 }
12561303
1257- list_for_each_entry (ventry , & vchunk -> ventry_list , list ) {
1304+ /* If the migration got interrupted, we have the ventry to start from
1305+ * stored in context.
1306+ */
1307+ if (ctx -> start_ventry )
1308+ ventry = ctx -> start_ventry ;
1309+ else
1310+ ventry = list_first_entry (& vchunk -> ventry_list ,
1311+ typeof (* ventry ), list );
1312+
1313+ list_for_each_entry_from (ventry , & vchunk -> ventry_list , list ) {
1314+ /* During rollback, once we reach the ventry that failed
1315+ * to migrate, we are done.
1316+ */
1317+ if (ventry == ctx -> stop_ventry )
1318+ break ;
1319+
12581320 err = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
12591321 vchunk -> chunk , credits );
12601322 if (err ) {
@@ -1265,16 +1327,25 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
12651327 * in vchunk->chunk.
12661328 */
12671329 swap (vchunk -> chunk , vchunk -> chunk2 );
1330+ /* The rollback has to be done from beginning of the
1331+ * chunk, that is why we have to null the start_ventry.
1332+ * However, we know where to stop the rollback,
1333+ * at the current ventry.
1334+ */
1335+ ctx -> start_ventry = NULL ;
1336+ ctx -> stop_ventry = ventry ;
12681337 return err ;
12691338 } else if (* credits < 0 ) {
12701339 /* We are out of credits, the rest of the ventries
1271- * will be migrated later.
1340+ * will be migrated later. Save the ventry
1341+ * which we ended with.
12721342 */
1343+ ctx -> start_ventry = ventry ;
12731344 return 0 ;
12741345 }
12751346 }
12761347
1277- mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk );
1348+ mlxsw_sp_acl_tcam_vchunk_migrate_end (mlxsw_sp , vchunk , ctx );
12781349 return 0 ;
12791350}
12801351
@@ -1287,7 +1358,16 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
12871358 struct mlxsw_sp_acl_tcam_vchunk * vchunk ;
12881359 int err ;
12891360
1290- list_for_each_entry (vchunk , & vregion -> vchunk_list , list ) {
1361+ /* If the migration got interrupted, we have the vchunk
1362+ * we are working on stored in context.
1363+ */
1364+ if (ctx -> current_vchunk )
1365+ vchunk = ctx -> current_vchunk ;
1366+ else
1367+ vchunk = list_first_entry (& vregion -> vchunk_list ,
1368+ typeof (* vchunk ), list );
1369+
1370+ list_for_each_entry_from (vchunk , & vregion -> vchunk_list , list ) {
12911371 err = mlxsw_sp_acl_tcam_vchunk_migrate_one (mlxsw_sp , vchunk ,
12921372 vregion -> region ,
12931373 ctx , credits );
@@ -1315,6 +1395,7 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
13151395 * to vregion->region.
13161396 */
13171397 swap (vregion -> region , vregion -> region2 );
1398+ ctx -> current_vchunk = NULL ;
13181399 ctx -> this_is_rollback = true;
13191400 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion ,
13201401 ctx , credits );
0 commit comments