Skip to content

Commit 0c1d1d2

Browse files
committed
HBASE-25698 Fixing IllegalReferenceCountException when using TinyLfuBlockCache (#3215)
Signed-off-by: Andrew Purtell <[email protected]> Signed-off-by: Anoop Sam John <[email protected]> Signed-off-by: Michael Stack <[email protected]>
1 parent beaeb0d commit 0c1d1d2

File tree

2 files changed

+112
-8
lines changed

2 files changed

+112
-8
lines changed

hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/TinyLfuBlockCache.java

Lines changed: 40 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,13 @@ public boolean containsBlock(BlockCacheKey cacheKey) {
158158
@Override
159159
public Cacheable getBlock(BlockCacheKey cacheKey,
160160
boolean caching, boolean repeat, boolean updateCacheMetrics) {
161-
Cacheable value = cache.getIfPresent(cacheKey);
161+
Cacheable value = cache.asMap().computeIfPresent(cacheKey, (blockCacheKey, cacheable) -> {
162+
// It will be referenced by RPC path, so increase here. NOTICE: Must do the retain inside
163+
// this block. because if retain outside the map#computeIfPresent, the evictBlock may remove
164+
// the block and release, then we're retaining a block with refCnt=0 which is disallowed.
165+
cacheable.retain();
166+
return cacheable;
167+
});
162168
if (value == null) {
163169
if (repeat) {
164170
return null;
@@ -169,9 +175,6 @@ public Cacheable getBlock(BlockCacheKey cacheKey,
169175
if (victimCache != null) {
170176
value = victimCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
171177
if ((value != null) && caching) {
172-
if ((value instanceof HFileBlock) && ((HFileBlock) value).isSharedMem()) {
173-
value = HFileBlock.deepCloneOnHeap((HFileBlock) value);
174-
}
175178
cacheBlock(cacheKey, value);
176179
}
177180
}
@@ -192,17 +195,48 @@ public void cacheBlock(BlockCacheKey key, Cacheable value) {
192195
// If there are a lot of blocks that are too big this can make the logs too noisy (2% logged)
193196
if (stats.failInsert() % 50 == 0) {
194197
LOG.warn(String.format(
195-
"Trying to cache too large a block %s @ %,d is %,d which is larger than %,d",
196-
key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE));
198+
"Trying to cache too large a block %s @ %,d is %,d which is larger than %,d",
199+
key.getHfileName(), key.getOffset(), value.heapSize(), DEFAULT_MAX_BLOCK_SIZE));
197200
}
198201
} else {
202+
value = asReferencedHeapBlock(value);
199203
cache.put(key, value);
200204
}
201205
}
202206

207+
/**
208+
* The block cached in TinyLfuBlockCache will always be an heap block: on the one side, the heap
209+
* access will be more faster then off-heap, the small index block or meta block cached in
210+
* CombinedBlockCache will benefit a lot. on other side, the TinyLfuBlockCache size is always
211+
* calculated based on the total heap size, if caching an off-heap block in TinyLfuBlockCache, the
212+
* heap size will be messed up. Here we will clone the block into an heap block if it's an
213+
* off-heap block, otherwise just use the original block. The key point is maintain the refCnt of
214+
* the block (HBASE-22127): <br>
215+
* 1. if cache the cloned heap block, its refCnt is an totally new one, it's easy to handle; <br>
216+
* 2. if cache the original heap block, we're sure that it won't be tracked in ByteBuffAllocator's
217+
* reservoir, if both RPC and TinyLfuBlockCache release the block, then it can be
218+
* garbage collected by JVM, so need a retain here.
219+
*
220+
* @param buf the original block
221+
* @return an block with an heap memory backend.
222+
*/
223+
private Cacheable asReferencedHeapBlock(Cacheable buf) {
224+
if (buf instanceof HFileBlock) {
225+
HFileBlock blk = ((HFileBlock) buf);
226+
if (blk.isSharedMem()) {
227+
return HFileBlock.deepCloneOnHeap(blk);
228+
}
229+
}
230+
// The block will be referenced by this TinyLfuBlockCache, so should increase its refCnt here.
231+
return buf.retain();
232+
}
233+
203234
@Override
204235
public boolean evictBlock(BlockCacheKey cacheKey) {
205236
Cacheable value = cache.asMap().remove(cacheKey);
237+
if (value != null) {
238+
value.release();
239+
}
206240
return (value != null);
207241
}
208242

hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestHFile.java

Lines changed: 72 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.BUFFER_SIZE_KEY;
2323
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MAX_BUFFER_COUNT_KEY;
2424
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.MIN_ALLOCATE_SIZE_KEY;
25+
import static org.apache.hadoop.hbase.io.hfile.BlockCacheFactory.BLOCKCACHE_POLICY_KEY;
2526
import static org.apache.hadoop.hbase.io.hfile.CacheConfig.EVICT_BLOCKS_ON_CLOSE_KEY;
2627
import static org.junit.Assert.assertEquals;
2728
import static org.junit.Assert.assertFalse;
@@ -204,10 +205,11 @@ public void testReaderWithLRUBlockCache() throws Exception {
204205
lru.shutdown();
205206
}
206207

207-
private BlockCache initCombinedBlockCache() {
208+
private BlockCache initCombinedBlockCache(final String l1CachePolicy) {
208209
Configuration that = HBaseConfiguration.create(conf);
209210
that.setFloat(BUCKET_CACHE_SIZE_KEY, 32); // 32MB for bucket cache.
210211
that.set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
212+
that.set(BLOCKCACHE_POLICY_KEY, l1CachePolicy);
211213
BlockCache bc = BlockCacheFactory.createBlockCache(that);
212214
Assert.assertNotNull(bc);
213215
Assert.assertTrue(bc instanceof CombinedBlockCache);
@@ -224,7 +226,7 @@ public void testReaderWithCombinedBlockCache() throws Exception {
224226
fillByteBuffAllocator(alloc, bufCount);
225227
Path storeFilePath = writeStoreFile();
226228
// Open the file reader with CombinedBlockCache
227-
BlockCache combined = initCombinedBlockCache();
229+
BlockCache combined = initCombinedBlockCache("LRU");
228230
conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
229231
CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
230232
HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf);
@@ -854,4 +856,72 @@ public void testDBEShipped() throws IOException {
854856
writer.close();
855857
}
856858
}
859+
860+
/**
861+
* Test case for CombinedBlockCache with TinyLfu as L1 cache
862+
*/
863+
@Test
864+
public void testReaderWithTinyLfuCombinedBlockCache() throws Exception {
865+
testReaderCombinedCache("TinyLfu");
866+
}
867+
868+
/**
869+
* Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
870+
*/
871+
@Test
872+
public void testReaderWithAdaptiveLruCombinedBlockCache() throws Exception {
873+
testReaderCombinedCache("AdaptiveLRU");
874+
}
875+
876+
/**
877+
* Test case for CombinedBlockCache with AdaptiveLRU as L1 cache
878+
*/
879+
@Test
880+
public void testReaderWithLruCombinedBlockCache() throws Exception {
881+
testReaderCombinedCache("LRU");
882+
}
883+
884+
private void testReaderCombinedCache(final String l1CachePolicy) throws Exception {
885+
int bufCount = 1024;
886+
int blockSize = 64 * 1024;
887+
ByteBuffAllocator alloc = initAllocator(true, bufCount, blockSize, 0);
888+
fillByteBuffAllocator(alloc, bufCount);
889+
Path storeFilePath = writeStoreFile();
890+
// Open the file reader with CombinedBlockCache
891+
BlockCache combined = initCombinedBlockCache(l1CachePolicy);
892+
conf.setBoolean(EVICT_BLOCKS_ON_CLOSE_KEY, true);
893+
CacheConfig cacheConfig = new CacheConfig(conf, null, combined, alloc);
894+
HFile.Reader reader = HFile.createReader(fs, storeFilePath, cacheConfig, true, conf);
895+
long offset = 0;
896+
Cacheable cachedBlock = null;
897+
while (offset < reader.getTrailer().getLoadOnOpenDataOffset()) {
898+
BlockCacheKey key = new BlockCacheKey(storeFilePath.getName(), offset);
899+
HFileBlock block = reader.readBlock(offset, -1, true, true, false, true, null, null);
900+
offset += block.getOnDiskSizeWithHeader();
901+
// Read the cached block.
902+
cachedBlock = combined.getBlock(key, false, false, true);
903+
try {
904+
Assert.assertNotNull(cachedBlock);
905+
Assert.assertTrue(cachedBlock instanceof HFileBlock);
906+
HFileBlock hfb = (HFileBlock) cachedBlock;
907+
// Data block will be cached in BucketCache, so it should be an off-heap block.
908+
if (hfb.getBlockType().isData()) {
909+
Assert.assertTrue(hfb.isSharedMem());
910+
} else if (!l1CachePolicy.equals("TinyLfu")) {
911+
Assert.assertFalse(hfb.isSharedMem());
912+
}
913+
} finally {
914+
cachedBlock.release();
915+
}
916+
block.release(); // return back the ByteBuffer back to allocator.
917+
}
918+
reader.close();
919+
combined.shutdown();
920+
if (cachedBlock != null) {
921+
Assert.assertEquals(0, cachedBlock.refCnt());
922+
}
923+
Assert.assertEquals(bufCount, alloc.getFreeBufferCount());
924+
alloc.clean();
925+
}
926+
857927
}

0 commit comments

Comments
 (0)