@@ -773,31 +773,6 @@ MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {
773773 return ZSTD_countTrailingZeros64 (val );
774774}
775775
776- /* ZSTD_rotateRight_*():
777- * Rotates a bitfield to the right by "count" bits.
778- * https://en.wikipedia.org/w/index.php?title=Circular_shift&oldid=991635599#Implementing_circular_shifts
779- */
780- FORCE_INLINE_TEMPLATE
781- U64 ZSTD_rotateRight_U64 (U64 const value , U32 count ) {
782- assert (count < 64 );
783- count &= 0x3F ; /* for fickle pattern recognition */
784- return (value >> count ) | (U64 )(value << ((0U - count ) & 0x3F ));
785- }
786-
787- FORCE_INLINE_TEMPLATE
788- U32 ZSTD_rotateRight_U32 (U32 const value , U32 count ) {
789- assert (count < 32 );
790- count &= 0x1F ; /* for fickle pattern recognition */
791- return (value >> count ) | (U32 )(value << ((0U - count ) & 0x1F ));
792- }
793-
794- FORCE_INLINE_TEMPLATE
795- U16 ZSTD_rotateRight_U16 (U16 const value , U32 count ) {
796- assert (count < 16 );
797- count &= 0x0F ; /* for fickle pattern recognition */
798- return (value >> count ) | (U16 )(value << ((0U - count ) & 0x0F ));
799- }
800-
801776/* ZSTD_row_nextIndex():
802777 * Returns the next index to insert at within a tagTable row, and updates the "head"
803778 * value to reflect the update. Essentially cycles backwards from [1, {entries per row})
@@ -850,7 +825,7 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
850825 U32 const lim = idx + MIN (ZSTD_ROW_HASH_CACHE_SIZE , maxElemsToPrefetch );
851826
852827 for (; idx < lim ; ++ idx ) {
853- U32 const hash = (U32 )ZSTD_hashPtr (base + idx , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls );
828+ U32 const hash = (U32 )ZSTD_hashPtrSalted (base + idx , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls , ms -> hashSalt );
854829 U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS ) << rowLog ;
855830 ZSTD_row_prefetch (hashTable , tagTable , row , rowLog );
856831 ms -> hashCache [idx & ZSTD_ROW_HASH_CACHE_MASK ] = hash ;
@@ -868,9 +843,10 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const B
868843FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash (U32 * cache , U32 const * hashTable ,
869844 BYTE const * tagTable , BYTE const * base ,
870845 U32 idx , U32 const hashLog ,
871- U32 const rowLog , U32 const mls )
846+ U32 const rowLog , U32 const mls ,
847+ U64 const hashSalt )
872848{
873- U32 const newHash = (U32 )ZSTD_hashPtr (base + idx + ZSTD_ROW_HASH_CACHE_SIZE , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls );
849+ U32 const newHash = (U32 )ZSTD_hashPtrSalted (base + idx + ZSTD_ROW_HASH_CACHE_SIZE , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls , hashSalt );
874850 U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS ) << rowLog ;
875851 ZSTD_row_prefetch (hashTable , tagTable , row , rowLog );
876852 { U32 const hash = cache [idx & ZSTD_ROW_HASH_CACHE_MASK ];
@@ -890,21 +866,24 @@ FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,
890866 U32 * const hashTable = ms -> hashTable ;
891867 BYTE * const tagTable = ms -> tagTable ;
892868 U32 const hashLog = ms -> rowHashLog ;
869+ U32 hashSaltEntropyCollected = 0 ;
893870 const BYTE * const base = ms -> window .base ;
894871
895872 DEBUGLOG (6 , "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u" , updateStartIdx , updateEndIdx );
896873 for (; updateStartIdx < updateEndIdx ; ++ updateStartIdx ) {
897- U32 const hash = useCache ? ZSTD_row_nextCachedHash (ms -> hashCache , hashTable , tagTable , base , updateStartIdx , hashLog , rowLog , mls )
898- : (U32 )ZSTD_hashPtr (base + updateStartIdx , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls );
874+ U32 const hash = useCache ? ZSTD_row_nextCachedHash (ms -> hashCache , hashTable , tagTable , base , updateStartIdx , hashLog , rowLog , mls , ms -> hashSalt )
875+ : (U32 )ZSTD_hashPtrSalted (base + updateStartIdx , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls , ms -> hashSalt );
899876 U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS ) << rowLog ;
900877 U32 * const row = hashTable + relRow ;
901878 BYTE * tagRow = tagTable + relRow ;
902879 U32 const pos = ZSTD_row_nextIndex (tagRow , rowMask );
903880
904- assert (hash == ZSTD_hashPtr (base + updateStartIdx , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls ));
881+ assert (hash == ZSTD_hashPtrSalted (base + updateStartIdx , hashLog + ZSTD_ROW_HASH_TAG_BITS , mls , ms -> hashSalt ));
905882 tagRow [pos ] = hash & ZSTD_ROW_HASH_TAG_MASK ;
906883 row [pos ] = updateStartIdx ;
884+ hashSaltEntropyCollected = hash ;
907885 }
886+ ms -> hashSaltEntropy += hashSaltEntropyCollected ; /* collect salt entropy */
908887}
909888
910889/* ZSTD_row_update_internal():
@@ -1162,6 +1141,7 @@ size_t ZSTD_RowFindBestMatch(
11621141 const U32 rowMask = rowEntries - 1 ;
11631142 const U32 cappedSearchLog = MIN (cParams -> searchLog , rowLog ); /* nb of searches is capped at nb entries per row */
11641143 const U32 groupWidth = ZSTD_row_matchMaskGroupWidth (rowEntries );
1144+ const U64 hashSalt = ms -> hashSalt ;
11651145 U32 nbAttempts = 1U << cappedSearchLog ;
11661146 size_t ml = 4 - 1 ;
11671147
@@ -1199,7 +1179,7 @@ size_t ZSTD_RowFindBestMatch(
11991179 /* Update the hashTable and tagTable up to (but not including) ip */
12001180 ZSTD_row_update_internal (ms , ip , mls , rowLog , rowMask , 1 /* useCache */ );
12011181 { /* Get the hash for ip, compute the appropriate row */
1202- U32 const hash = ZSTD_row_nextCachedHash (hashCache , hashTable , tagTable , base , curr , hashLog , rowLog , mls );
1182+ U32 const hash = ZSTD_row_nextCachedHash (hashCache , hashTable , tagTable , base , curr , hashLog , rowLog , mls , hashSalt );
12031183 U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS ) << rowLog ;
12041184 U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK ;
12051185 U32 * const row = hashTable + relRow ;
0 commit comments