diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java index 0f54fafba954..c8c21e0625c6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileWriterImpl.java @@ -556,9 +556,9 @@ private void writeInlineBlocks(boolean closing) throws IOException { private void doCacheOnWrite(long offset) { cacheConf.getBlockCache().ifPresent(cache -> { HFileBlock cacheFormatBlock = blockWriter.getBlockForCaching(cacheConf); + BlockCacheKey key = buildCacheBlockKey(offset, cacheFormatBlock.getBlockType()); try { - cache.cacheBlock(new BlockCacheKey(name, offset, true, cacheFormatBlock.getBlockType()), - cacheFormatBlock, cacheConf.isInMemory(), true); + cache.cacheBlock(key, cacheFormatBlock, cacheConf.isInMemory(), true); } finally { // refCnt will auto increase when block add to Cache, see RAMCache#putIfAbsent cacheFormatBlock.release(); @@ -566,6 +566,13 @@ private void doCacheOnWrite(long offset) { }); } + private BlockCacheKey buildCacheBlockKey(long offset, BlockType blockType) { + if (path != null) { + return new BlockCacheKey(path, offset, true, blockType); + } + return new BlockCacheKey(name, offset, true, blockType); + } + /** * Ready a new block for writing. */ diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java index 34c8f07f9203..8951b1b72412 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestCacheEviction.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.regionserver.HStoreFile; +import org.apache.hadoop.hbase.regionserver.storefiletracker.StoreFileTrackerFactory; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.Bytes; @@ -67,6 +68,7 @@ public static void setUp() throws Exception { UTIL.getConfiguration().setBoolean(PREFETCH_BLOCKS_ON_OPEN_KEY, true); UTIL.getConfiguration().set(BUCKET_CACHE_IOENGINE_KEY, "offheap"); UTIL.getConfiguration().setInt(BUCKET_CACHE_SIZE_KEY, 200); + UTIL.getConfiguration().set(StoreFileTrackerFactory.TRACKER_IMPL, "FILE"); } @Test @@ -103,7 +105,7 @@ private void doTestEvictOnSplit(String table, boolean evictOnSplit, UTIL.startMiniCluster(1); try { TableName tableName = TableName.valueOf(table); - createAndCacheTable(tableName); + createTable(tableName, true); Collection files = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles(); checkCacheForBlocks(tableName, files, predicateBeforeSplit); @@ -125,7 +127,7 @@ private void doTestEvictOnClose(String table, boolean evictOnClose, UTIL.startMiniCluster(1); try { TableName tableName = TableName.valueOf(table); - createAndCacheTable(tableName); + createTable(tableName, true); Collection files = UTIL.getMiniHBaseCluster().getRegions(tableName).get(0).getStores().get(0).getStorefiles(); checkCacheForBlocks(tableName, files, predicateBeforeClose); @@ -139,7 +141,8 @@ private void doTestEvictOnClose(String table, boolean evictOnClose, } } - private void createAndCacheTable(TableName tableName) throws IOException, InterruptedException { + private void createTable(TableName tableName, boolean shouldFlushTable) + throws IOException, InterruptedException { byte[] family = Bytes.toBytes("CF"); TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) .setColumnFamily(ColumnFamilyDescriptorBuilder.of(family)).build(); @@ -153,7 +156,10 @@ private void createAndCacheTable(TableName tableName) throws IOException, Interr puts.add(p); } tbl.put(puts); - UTIL.getAdmin().flush(tableName); + if (shouldFlushTable) { + UTIL.getAdmin().flush(tableName); + Thread.sleep(5000); + } } private void checkCacheForBlocks(TableName tableName, Collection files, @@ -167,4 +173,44 @@ private void checkCacheForBlocks(TableName tableName, Collection fil }); }); } + + @Test + public void testNoCacheWithoutFlush() throws Exception { + UTIL.startMiniCluster(1); + try { + TableName tableName = TableName.valueOf("tableNoCache"); + createTable(tableName, false); + checkRegionCached(tableName, false); + } finally { + UTIL.shutdownMiniCluster(); + } + } + + @Test + public void testCacheWithFlush() throws Exception { + UTIL.startMiniCluster(1); + try { + TableName tableName = TableName.valueOf("tableWithFlush"); + createTable(tableName, true); + checkRegionCached(tableName, true); + } finally { + UTIL.shutdownMiniCluster(); + } + } + + private void checkRegionCached(TableName tableName, boolean isCached) throws IOException { + UTIL.getMiniHBaseCluster().getRegions(tableName).forEach(r -> { + try { + UTIL.getMiniHBaseCluster().getClusterMetrics().getLiveServerMetrics().forEach((sn, sm) -> { + for (Map.Entry rm : sm.getRegionMetrics().entrySet()) { + if (rm.getValue().getNameAsString().equals(r.getRegionInfo().getRegionNameAsString())) { + assertTrue(isCached == (rm.getValue().getCurrentRegionCachedRatio() > 0.0f)); + } + } + }); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } }