@@ -1955,6 +1955,17 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
19551955 ZSTD_cwksp_clean_tables (ws );
19561956 }
19571957
1958+ if (ZSTD_rowMatchFinderUsed (cParams -> strategy , useRowMatchFinder )) {
1959+ /* Row match finder needs an additional table of hashes ("tags") */
1960+ size_t const tagTableSize = hSize ;
1961+ ms -> tagTable = (BYTE * ) ZSTD_cwksp_reserve_aligned_init_once (ws , tagTableSize );
1962+ { /* Switch to 32-entry rows if searchLog is 5 (or more) */
1963+ U32 const rowLog = BOUNDED (4 , cParams -> searchLog , 6 );
1964+ assert (cParams -> hashLog >= rowLog );
1965+ ms -> rowHashLog = cParams -> hashLog - rowLog ;
1966+ }
1967+ }
1968+
19581969 /* opt parser space */
19591970 if ((forWho == ZSTD_resetTarget_CCtx ) && (cParams -> strategy >= ZSTD_btopt )) {
19601971 DEBUGLOG (4 , "reserving optimal parser space" );
@@ -1966,19 +1977,6 @@ ZSTD_reset_matchState(ZSTD_matchState_t* ms,
19661977 ms -> opt .priceTable = (ZSTD_optimal_t * )ZSTD_cwksp_reserve_aligned (ws , (ZSTD_OPT_NUM + 1 ) * sizeof (ZSTD_optimal_t ));
19671978 }
19681979
1969- if (ZSTD_rowMatchFinderUsed (cParams -> strategy , useRowMatchFinder )) {
1970- { /* Row match finder needs an additional table of hashes ("tags") */
1971- size_t const tagTableSize = hSize ;
1972- ms -> tagTable = (BYTE * )ZSTD_cwksp_reserve_aligned (ws , tagTableSize );
1973- if (ms -> tagTable ) ZSTD_memset (ms -> tagTable , 0 , tagTableSize );
1974- }
1975- { /* Switch to 32-entry rows if searchLog is 5 (or more) */
1976- U32 const rowLog = BOUNDED (4 , cParams -> searchLog , 6 );
1977- assert (cParams -> hashLog >= rowLog );
1978- ms -> rowHashLog = cParams -> hashLog - rowLog ;
1979- }
1980- }
1981-
19821980 ms -> cParams = * cParams ;
19831981
19841982 RETURN_ERROR_IF (ZSTD_cwksp_reserve_failed (ws ), memory_allocation ,
@@ -2125,13 +2123,46 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
21252123
21262124 ZSTD_reset_compressedBlockState (zc -> blockState .prevCBlock );
21272125
2126+ FORWARD_IF_ERROR (ZSTD_reset_matchState (
2127+ & zc -> blockState .matchState ,
2128+ ws ,
2129+ & params -> cParams ,
2130+ params -> useRowMatchFinder ,
2131+ crp ,
2132+ needsIndexReset ,
2133+ ZSTD_resetTarget_CCtx ), "" );
2134+
2135+ zc -> seqStore .sequencesStart = (seqDef * )ZSTD_cwksp_reserve_aligned (ws , maxNbSeq * sizeof (seqDef ));
2136+
2137+ /* ldm hash table */
2138+ if (params -> ldmParams .enableLdm == ZSTD_ps_enable ) {
2139+ /* TODO: avoid memset? */
2140+ size_t const ldmHSize = ((size_t )1 ) << params -> ldmParams .hashLog ;
2141+ zc -> ldmState .hashTable = (ldmEntry_t * )ZSTD_cwksp_reserve_aligned (ws , ldmHSize * sizeof (ldmEntry_t ));
2142+ ZSTD_memset (zc -> ldmState .hashTable , 0 , ldmHSize * sizeof (ldmEntry_t ));
2143+ zc -> ldmSequences = (rawSeq * )ZSTD_cwksp_reserve_aligned (ws , maxNbLdmSeq * sizeof (rawSeq ));
2144+ zc -> maxNbLdmSequences = maxNbLdmSeq ;
2145+
2146+ ZSTD_window_init (& zc -> ldmState .window );
2147+ zc -> ldmState .loadedDictEnd = 0 ;
2148+ }
2149+
2150+ /* reserve space for block-level external sequences */
2151+ if (params -> useSequenceProducer ) {
2152+ size_t const maxNbExternalSeq = ZSTD_sequenceBound (blockSize );
2153+ zc -> externalMatchCtx .seqBufferCapacity = maxNbExternalSeq ;
2154+ zc -> externalMatchCtx .seqBuffer =
2155+ (ZSTD_Sequence * )ZSTD_cwksp_reserve_aligned (ws , maxNbExternalSeq * sizeof (ZSTD_Sequence ));
2156+ }
2157+
2158+ /* buffers */
2159+
21282160 /* ZSTD_wildcopy() is used to copy into the literals buffer,
21292161 * so we have to oversize the buffer by WILDCOPY_OVERLENGTH bytes.
21302162 */
21312163 zc -> seqStore .litStart = ZSTD_cwksp_reserve_buffer (ws , blockSize + WILDCOPY_OVERLENGTH );
21322164 zc -> seqStore .maxNbLit = blockSize ;
21332165
2134- /* buffers */
21352166 zc -> bufferedPolicy = zbuff ;
21362167 zc -> inBuffSize = buffInSize ;
21372168 zc -> inBuff = (char * )ZSTD_cwksp_reserve_buffer (ws , buffInSize );
@@ -2154,40 +2185,9 @@ static size_t ZSTD_resetCCtx_internal(ZSTD_CCtx* zc,
21542185 zc -> seqStore .llCode = ZSTD_cwksp_reserve_buffer (ws , maxNbSeq * sizeof (BYTE ));
21552186 zc -> seqStore .mlCode = ZSTD_cwksp_reserve_buffer (ws , maxNbSeq * sizeof (BYTE ));
21562187 zc -> seqStore .ofCode = ZSTD_cwksp_reserve_buffer (ws , maxNbSeq * sizeof (BYTE ));
2157- zc -> seqStore .sequencesStart = (seqDef * )ZSTD_cwksp_reserve_aligned (ws , maxNbSeq * sizeof (seqDef ));
2158-
2159- FORWARD_IF_ERROR (ZSTD_reset_matchState (
2160- & zc -> blockState .matchState ,
2161- ws ,
2162- & params -> cParams ,
2163- params -> useRowMatchFinder ,
2164- crp ,
2165- needsIndexReset ,
2166- ZSTD_resetTarget_CCtx ), "" );
2167-
2168- /* ldm hash table */
2169- if (params -> ldmParams .enableLdm == ZSTD_ps_enable ) {
2170- /* TODO: avoid memset? */
2171- size_t const ldmHSize = ((size_t )1 ) << params -> ldmParams .hashLog ;
2172- zc -> ldmState .hashTable = (ldmEntry_t * )ZSTD_cwksp_reserve_aligned (ws , ldmHSize * sizeof (ldmEntry_t ));
2173- ZSTD_memset (zc -> ldmState .hashTable , 0 , ldmHSize * sizeof (ldmEntry_t ));
2174- zc -> ldmSequences = (rawSeq * )ZSTD_cwksp_reserve_aligned (ws , maxNbLdmSeq * sizeof (rawSeq ));
2175- zc -> maxNbLdmSequences = maxNbLdmSeq ;
2176-
2177- ZSTD_window_init (& zc -> ldmState .window );
2178- zc -> ldmState .loadedDictEnd = 0 ;
2179- }
2180-
2181- /* reserve space for block-level external sequences */
2182- if (params -> useSequenceProducer ) {
2183- size_t const maxNbExternalSeq = ZSTD_sequenceBound (blockSize );
2184- zc -> externalMatchCtx .seqBufferCapacity = maxNbExternalSeq ;
2185- zc -> externalMatchCtx .seqBuffer =
2186- (ZSTD_Sequence * )ZSTD_cwksp_reserve_aligned (ws , maxNbExternalSeq * sizeof (ZSTD_Sequence ));
2187- }
21882188
21892189 DEBUGLOG (3 , "wksp: finished allocating, %zd bytes remain available" , ZSTD_cwksp_available_space (ws ));
2190- assert (ZSTD_cwksp_estimated_space_within_bounds (ws , neededSpace , resizeWorkspace ));
2190+ assert (ZSTD_cwksp_estimated_space_within_bounds (ws , neededSpace ));
21912191
21922192 zc -> initialized = 1 ;
21932193
0 commit comments