Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.io.hfile.bucket.BucketCache;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* CombinedBlockCache is an abstraction layer that combines {@link FirstLevelBlockCache} and
Expand All @@ -38,6 +40,8 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
protected final BlockCache l2Cache;
protected final CombinedCacheStats combinedCacheStats;

private static final Logger LOG = LoggerFactory.getLogger(CombinedBlockCache.class);

public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) {
this.l1Cache = l1Cache;
this.l2Cache = l2Cache;
Expand Down Expand Up @@ -77,16 +81,49 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
// We are not in a position to exactly look at LRU cache or BC as BlockType may not be getting
// passed always.
Cacheable block = null;
// We don't know the block type. We should try to get it on one of the caches only,
// but not both otherwise we'll over compute on misses. Here we check if the key is on L1,
// if so, call getBlock on L1 and that will compute the hit. Otherwise, we'll try to get it from
// L2 and whatever happens, we'll update the stats there.
boolean existInL1 = l1Cache.containsBlock(cacheKey);
if (!existInL1 && updateCacheMetrics && !repeat) {
// If the block does not exist in L1, the containsBlock should be counted as one miss.
l1Cache.getStats().miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
// if we know it's in L1, just delegate call to l1 and return it
if (existInL1) {
block = l1Cache.getBlock(cacheKey, caching, repeat, false);
} else {
block = l2Cache.getBlock(cacheKey, caching, repeat, false);
}
if (updateCacheMetrics) {
boolean metaBlock = isMetaBlock(cacheKey.getBlockType());
if (metaBlock) {
if (!existInL1 && block != null) {
LOG.warn("Cache key {} had block type {}, but was found in L2 cache.", cacheKey,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How can this happen? Is this a bug?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this is just a defensive logging? It should not happen normally. If we see this log, then there could be a bug?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I decided to put this as I saw a UT doing the opposite (creating a meta block then wrongly passing block type data in the cachekey), so I wonder if this mistake might be happening anywhere in the "real" code. I tried to some review of all callees to this method and haven't found it, but I could had missed something.

cacheKey.getBlockType());
updateBlockMetrics(block, cacheKey, l2Cache, caching);
} else {
updateBlockMetrics(block, cacheKey, l1Cache, caching);
}
} else {
if (existInL1) {
LOG.warn("Cache key {} had block type {}, but was found in L1 cache.", cacheKey,
cacheKey.getBlockType());
updateBlockMetrics(block, cacheKey, l1Cache, caching);
} else {
updateBlockMetrics(block, cacheKey, l2Cache, caching);
}
}
}
return block;
}

private void updateBlockMetrics(Cacheable block, BlockCacheKey key, BlockCache cache,
boolean caching) {
if (block == null) {
cache.getStats().miss(caching, key.isPrimary(), key.getBlockType());
} else {
cache.getStats().hit(caching, key.isPrimary(), key.getBlockType());

}
return existInL1
? l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics)
: l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
}

@Override
Expand All @@ -95,7 +132,13 @@ public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repea
if (blockType == null) {
return getBlock(cacheKey, caching, repeat, updateCacheMetrics);
}
boolean metaBlock = isMetaBlock(blockType);
cacheKey.setBlockType(blockType);
return getBlockWithType(cacheKey, caching, repeat, updateCacheMetrics);
}

private Cacheable getBlockWithType(BlockCacheKey cacheKey, boolean caching, boolean repeat,
boolean updateCacheMetrics) {
boolean metaBlock = isMetaBlock(cacheKey.getBlockType());
if (metaBlock) {
return l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
} else {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,16 @@

import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_IOENGINE_KEY;
import static org.apache.hadoop.hbase.HConstants.BUCKET_CACHE_SIZE_KEY;
import static org.apache.hadoop.hbase.io.ByteBuffAllocator.HEAP;
import static org.junit.Assert.assertEquals;

import java.nio.ByteBuffer;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HBaseTestingUtil;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.hfile.CombinedBlockCache.CombinedCacheStats;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Assert;
import org.junit.ClassRule;
Expand Down Expand Up @@ -110,11 +114,53 @@ public void testCombinedCacheStats() {

@Test
public void testMultiThreadGetAndEvictBlock() throws Exception {
BlockCache blockCache = createCombinedBlockCache();
TestLruBlockCache.testMultiThreadGetAndEvictBlockInternal(blockCache);
}

@Test
public void testCombinedBlockCacheStatsWithDataBlockType() throws Exception {
testCombinedBlockCacheStats(BlockType.DATA, 0, 1);
}

@Test
public void testCombinedBlockCacheStatsWithMetaBlockType() throws Exception {
testCombinedBlockCacheStats(BlockType.META, 1, 0);
}

@Test
public void testCombinedBlockCacheStatsWithNoBlockType() throws Exception {
testCombinedBlockCacheStats(null, 0, 1);
}

private CombinedBlockCache createCombinedBlockCache() {
Configuration conf = UTIL.getConfiguration();
conf.set(BUCKET_CACHE_IOENGINE_KEY, "offheap");
conf.setInt(BUCKET_CACHE_SIZE_KEY, 32);
BlockCache blockCache = BlockCacheFactory.createBlockCache(conf);
Assert.assertTrue(blockCache instanceof CombinedBlockCache);
TestLruBlockCache.testMultiThreadGetAndEvictBlockInternal(blockCache);
return (CombinedBlockCache) blockCache;
}

public void testCombinedBlockCacheStats(BlockType type, int expectedL1Miss, int expectedL2Miss)
throws Exception {
CombinedBlockCache blockCache = createCombinedBlockCache();
BlockCacheKey key = new BlockCacheKey("key1", 0, false, type);
int size = 100;
int length = HConstants.HFILEBLOCK_HEADER_SIZE + size;
byte[] byteArr = new byte[length];
HFileContext meta = new HFileContextBuilder().build();
HFileBlock blk = new HFileBlock(type != null ? type : BlockType.DATA, size, size, -1,
ByteBuff.wrap(ByteBuffer.wrap(byteArr, 0, size)), HFileBlock.FILL_HEADER, -1, 52, -1, meta,
HEAP);
blockCache.cacheBlock(key, blk);
blockCache.getBlock(key, true, false, true);
assertEquals(0, blockCache.getStats().getMissCount());
blockCache.evictBlock(key);
blockCache.getBlock(key, true, false, true);
assertEquals(1, blockCache.getStats().getMissCount());
assertEquals(expectedL1Miss, blockCache.getFirstLevelCache().getStats().getMissCount());
assertEquals(expectedL2Miss, blockCache.getSecondLevelCache().getStats().getMissCount());
}

}