/tmp/bucketcache.map.
*/
- public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY =
+ public static final String BUCKET_CACHE_PERSISTENT_PATH_KEY =
"hbase.bucketcache.persistent.path";
/**
@@ -98,11 +105,11 @@ public class CacheConfig {
* as indices and blooms are kept in the lru blockcache and the data blocks in the
* bucket cache).
*/
- public static final String BUCKET_CACHE_COMBINED_KEY =
+ public static final String BUCKET_CACHE_COMBINED_KEY =
"hbase.bucketcache.combinedcache.enabled";
public static final String BUCKET_CACHE_WRITER_THREADS_KEY = "hbase.bucketcache.writer.threads";
- public static final String BUCKET_CACHE_WRITER_QUEUE_KEY =
+ public static final String BUCKET_CACHE_WRITER_QUEUE_KEY =
"hbase.bucketcache.writer.queuelength";
/**
@@ -175,6 +182,7 @@ private static enum ExternalBlockCaches {
memcached("org.apache.hadoop.hbase.io.hfile.MemcachedBlockCache");
// TODO(eclark): Consider more. Redis, etc.
Class extends BlockCache> clazz;
+ @SuppressWarnings("unchecked")
ExternalBlockCaches(String clazzName) {
try {
clazz = (Class extends BlockCache>) Class.forName(clazzName);
@@ -507,7 +515,9 @@ public boolean shouldCacheDataCompressed() {
* @return true if this {@link BlockCategory} should be compressed in blockcache, false otherwise
*/
public boolean shouldCacheCompressed(BlockCategory category) {
- if (!isBlockCacheEnabled()) return false;
+ if (!isBlockCacheEnabled()) {
+ return false;
+ }
switch (category) {
case DATA:
return this.cacheDataCompressed;
@@ -609,27 +619,61 @@ public String toString() {
*/
// Clear this if in tests you'd make more than one block cache instance.
static BlockCache GLOBAL_BLOCK_CACHE_INSTANCE;
- private static LruBlockCache GLOBAL_L1_CACHE_INSTANCE = null;
- private static BlockCache GLOBAL_L2_CACHE_INSTANCE = null;
+ private static FirstLevelBlockCache GLOBAL_L1_CACHE_INSTANCE;
+ private static BlockCache GLOBAL_L2_CACHE_INSTANCE;
+ private static ForkJoinPool GLOBAL_FORKJOIN_POOL;
/** Boolean whether we have disabled the block cache entirely. */
static boolean blockCacheDisabled = false;
/**
- * @param c Configuration to use.
- * @return An L1 instance. Currently an instance of LruBlockCache.
+ * @param c Configuration to use
+ * @return An L1 instance
+ */
+ public static FirstLevelBlockCache getL1(final Configuration c) {
+ long xmx = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
+ long l1CacheSize = HeapMemorySizeUtil.getFirstLevelCacheSize(c, xmx);
+ return getL1(l1CacheSize, c);
+ }
+
+ /**
+ * @param c Configuration to use
+ * @param xmx Max heap memory
+ * @return An L1 instance
*/
- private static synchronized LruBlockCache getL1(final Configuration c) {
+
+ private synchronized static FirstLevelBlockCache getL1(long cacheSize, Configuration c) {
if (GLOBAL_L1_CACHE_INSTANCE != null) return GLOBAL_L1_CACHE_INSTANCE;
- final long lruCacheSize = HeapMemorySizeUtil.getLruCacheSize(c);
- if (lruCacheSize < 0) {
- blockCacheDisabled = true;
+ if (cacheSize < 0) {
+ return null;
}
- if (blockCacheDisabled) return null;
+ String policy = c.get(HFILE_BLOCK_CACHE_POLICY_KEY, HFILE_BLOCK_CACHE_POLICY_DEFAULT);
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
- LOG.info("Allocating LruBlockCache size=" +
- StringUtils.byteDesc(lruCacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
- GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(lruCacheSize, blockSize, true, c);
+ LOG.info("Allocating BlockCache size=" +
+ StringUtils.byteDesc(cacheSize) + ", blockSize=" + StringUtils.byteDesc(blockSize));
+ if (policy.equalsIgnoreCase("LRU")) {
+ GLOBAL_L1_CACHE_INSTANCE = new LruBlockCache(cacheSize, blockSize, true, c);
+ } else if (policy.equalsIgnoreCase("TinyLFU")) {
+ if (GLOBAL_FORKJOIN_POOL == null) {
+ GLOBAL_FORKJOIN_POOL = new ForkJoinPool();
+ }
+ Class> tinyLFUClass;
+ try {
+ tinyLFUClass = Class.forName("org.apache.hadoop.hbase.io.hfile.TinyLfuBlockCache");
+ GLOBAL_L1_CACHE_INSTANCE = (FirstLevelBlockCache)
+ tinyLFUClass.getDeclaredConstructor(long.class, long.class, Executor.class,
+ Configuration.class)
+ .newInstance(cacheSize, blockSize, GLOBAL_FORKJOIN_POOL, c);
+ } catch (Exception e) {
+ throw new RuntimeException(
+ "Unable to instantiate the TinyLfuBlockCache block cache policy." +
+ "If you want to use TinyLFU you must build with JDK8+, run with JRE8+, and have both " +
+ "the hbase-tinylfu-blockcache module and its dependency the caffiene library " +
+ "installed into the classpath.", e);
+ }
+ } else {
+ throw new IllegalArgumentException("Unknown block cache policy " + policy);
+ }
return GLOBAL_L1_CACHE_INSTANCE;
}
@@ -669,7 +713,7 @@ public CacheStats getL2Stats() {
}
private static BlockCache getExternalBlockcache(Configuration c) {
- Class klass = null;
+ Class> klass = null;
// Get the class, from the config. s
try {
@@ -697,7 +741,9 @@ private static BlockCache getExternalBlockcache(Configuration c) {
private static BlockCache getBucketCache(Configuration c) {
// Check for L2. ioengine name must be non-null.
String bucketCacheIOEngineName = c.get(BUCKET_CACHE_IOENGINE_KEY, null);
- if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) return null;
+ if (bucketCacheIOEngineName == null || bucketCacheIOEngineName.length() <= 0) {
+ return null;
+ }
int blockSize = c.getInt(BLOCKCACHE_BLOCKSIZE_KEY, HConstants.DEFAULT_BLOCKSIZE);
final long bucketCacheSize = HeapMemorySizeUtil.getBucketCacheSize(c);
@@ -755,33 +801,35 @@ private static BlockCache getBucketCache(Configuration c) {
* @return The block cache or null.
*/
public static synchronized BlockCache instantiateBlockCache(Configuration conf) {
- if (GLOBAL_BLOCK_CACHE_INSTANCE != null) return GLOBAL_BLOCK_CACHE_INSTANCE;
- if (blockCacheDisabled) return null;
+ if (GLOBAL_BLOCK_CACHE_INSTANCE != null) {
+ return GLOBAL_BLOCK_CACHE_INSTANCE;
+ }
+ if (blockCacheDisabled) {
+ return null;
+ }
if (conf.get(DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY) != null) {
LOG.warn("The config key " + DEPRECATED_BLOCKCACHE_BLOCKSIZE_KEY +
" is deprecated now, instead please use " + BLOCKCACHE_BLOCKSIZE_KEY +". "
+ "In future release we will remove the deprecated config.");
}
- LruBlockCache l1 = getL1(conf);
- // blockCacheDisabled is set as a side-effect of getL1Internal(), so check it again after the call.
- if (blockCacheDisabled) return null;
+ FirstLevelBlockCache l1 = getL1(conf);
BlockCache l2 = getL2(conf);
if (l2 == null) {
GLOBAL_BLOCK_CACHE_INSTANCE = l1;
} else {
boolean useExternal = conf.getBoolean(EXTERNAL_BLOCKCACHE_KEY, EXTERNAL_BLOCKCACHE_DEFAULT);
- boolean combinedWithLru = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
+ boolean combinedWithL1 = conf.getBoolean(BUCKET_CACHE_COMBINED_KEY,
DEFAULT_BUCKET_CACHE_COMBINED);
if (useExternal) {
GLOBAL_BLOCK_CACHE_INSTANCE = new InclusiveCombinedBlockCache(l1, l2);
} else {
- if (combinedWithLru) {
+ if (combinedWithL1) {
GLOBAL_BLOCK_CACHE_INSTANCE = new CombinedBlockCache(l1, l2);
} else {
- // L1 and L2 are not 'combined'. They are connected via the LruBlockCache victimhandler
- // mechanism. It is a little ugly but works according to the following: when the
- // background eviction thread runs, blocks evicted from L1 will go to L2 AND when we get
- // a block from the L1 cache, if not in L1, we will search L2.
+ // L1 and L2 are not 'combined'. They are connected via the FirstLevelBlockCache
+ // victimhandler mechanism. It is a little ugly but works according to the following:
+ // when the background eviction thread runs, blocks evicted from L1 will go to L2 AND when
+ // we get a block from the L1 cache, if not in L1, we will search L2.
GLOBAL_BLOCK_CACHE_INSTANCE = l1;
l1.setVictimCache(l2);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
index 562e874f82cc..3dbec8ca7cae 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/CombinedBlockCache.java
@@ -28,24 +28,24 @@
/**
* CombinedBlockCache is an abstraction layer that combines
- * {@link LruBlockCache} and {@link BucketCache}. The smaller lruCache is used
+ * {@link FirstLevelBlockCache} and {@link BucketCache}. The smaller lruCache is used
* to cache bloom blocks and index blocks. The larger l2Cache is used to
* cache data blocks. {@link #getBlock(BlockCacheKey, boolean, boolean, boolean)} reads
- * first from the smaller lruCache before looking for the block in the l2Cache. Blocks evicted
- * from lruCache are put into the bucket cache.
+ * first from the smaller l1Cache before looking for the block in the l2Cache. Blocks evicted
+ * from l1Cache are put into the bucket cache.
* Metrics are the combined size and hits and misses of both caches.
- *
+ *
*/
@InterfaceAudience.Private
public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
- protected final LruBlockCache lruCache;
+ protected final FirstLevelBlockCache l1Cache;
protected final BlockCache l2Cache;
protected final CombinedCacheStats combinedCacheStats;
- public CombinedBlockCache(LruBlockCache lruCache, BlockCache l2Cache) {
- this.lruCache = lruCache;
+ public CombinedBlockCache(FirstLevelBlockCache l1Cache, BlockCache l2Cache) {
+ this.l1Cache = l1Cache;
this.l2Cache = l2Cache;
- this.combinedCacheStats = new CombinedCacheStats(lruCache.getStats(),
+ this.combinedCacheStats = new CombinedCacheStats(l1Cache.getStats(),
l2Cache.getStats());
}
@@ -55,7 +55,7 @@ public long heapSize() {
if (l2Cache instanceof HeapSize) {
l2size = ((HeapSize) l2Cache).heapSize();
}
- return lruCache.heapSize() + l2size;
+ return l1Cache.heapSize() + l2size;
}
@Override
@@ -63,7 +63,7 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf, boolean inMemory,
final boolean cacheDataInL1) {
boolean metaBlock = buf.getBlockType().getCategory() != BlockCategory.DATA;
if (metaBlock || cacheDataInL1) {
- lruCache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
+ l1Cache.cacheBlock(cacheKey, buf, inMemory, cacheDataInL1);
} else {
l2Cache.cacheBlock(cacheKey, buf, inMemory, false);
}
@@ -77,25 +77,25 @@ public void cacheBlock(BlockCacheKey cacheKey, Cacheable buf) {
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching,
boolean repeat, boolean updateCacheMetrics) {
- boolean existInL1 = lruCache.containsBlock(cacheKey);
+ boolean existInL1 = l1Cache.containsBlock(cacheKey);
if (!existInL1 && updateCacheMetrics && !repeat) {
// If the block does not exist in L1, this check should be counted as a miss.
combinedCacheStats.lruCacheStats
.miss(caching, cacheKey.isPrimary(), cacheKey.getBlockType());
}
return existInL1 ?
- lruCache.getBlock(cacheKey, caching, repeat, updateCacheMetrics):
+ l1Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics):
l2Cache.getBlock(cacheKey, caching, repeat, updateCacheMetrics);
}
@Override
public boolean evictBlock(BlockCacheKey cacheKey) {
- return lruCache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey);
+ return l1Cache.evictBlock(cacheKey) || l2Cache.evictBlock(cacheKey);
}
@Override
public int evictBlocksByHfileName(String hfileName) {
- return lruCache.evictBlocksByHfileName(hfileName)
+ return l1Cache.evictBlocksByHfileName(hfileName)
+ l2Cache.evictBlocksByHfileName(hfileName);
}
@@ -106,43 +106,43 @@ public CacheStats getStats() {
@Override
public void shutdown() {
- lruCache.shutdown();
+ l1Cache.shutdown();
l2Cache.shutdown();
}
@Override
public long size() {
- return lruCache.size() + l2Cache.size();
+ return l1Cache.size() + l2Cache.size();
}
@Override
public long getMaxSize() {
- return lruCache.getMaxSize() + l2Cache.getMaxSize();
+ return l1Cache.getMaxSize() + l2Cache.getMaxSize();
}
@Override
public long getFreeSize() {
- return lruCache.getFreeSize() + l2Cache.getFreeSize();
+ return l1Cache.getFreeSize() + l2Cache.getFreeSize();
}
@Override
public long getCurrentSize() {
- return lruCache.getCurrentSize() + l2Cache.getCurrentSize();
+ return l1Cache.getCurrentSize() + l2Cache.getCurrentSize();
}
@Override
public long getCurrentDataSize() {
- return lruCache.getCurrentDataSize() + l2Cache.getCurrentDataSize();
+ return l1Cache.getCurrentDataSize() + l2Cache.getCurrentDataSize();
}
@Override
public long getBlockCount() {
- return lruCache.getBlockCount() + l2Cache.getBlockCount();
+ return l1Cache.getBlockCount() + l2Cache.getBlockCount();
}
@Override
public long getDataBlockCount() {
- return lruCache.getDataBlockCount() + l2Cache.getDataBlockCount();
+ return l1Cache.getDataBlockCount() + l2Cache.getDataBlockCount();
}
public static class CombinedCacheStats extends CacheStats {
@@ -327,7 +327,7 @@ public void rollMetricsPeriod() {
lruCacheStats.rollMetricsPeriod();
bucketCacheStats.rollMetricsPeriod();
}
-
+
@Override
public long getFailedInserts() {
return lruCacheStats.getFailedInserts() + bucketCacheStats.getFailedInserts();
@@ -338,13 +338,13 @@ public long getSumHitCountsPastNPeriods() {
return lruCacheStats.getSumHitCountsPastNPeriods()
+ bucketCacheStats.getSumHitCountsPastNPeriods();
}
-
+
@Override
public long getSumRequestCountsPastNPeriods() {
return lruCacheStats.getSumRequestCountsPastNPeriods()
+ bucketCacheStats.getSumRequestCountsPastNPeriods();
}
-
+
@Override
public long getSumHitCachingCountsPastNPeriods() {
return lruCacheStats.getSumHitCachingCountsPastNPeriods()
@@ -365,11 +365,11 @@ public IteratorBucketCache can be used as mainly a block cache (see * {@link org.apache.hadoop.hbase.io.hfile.CombinedBlockCache}), combined with - * combined with LruBlockCache to decrease CMS GC and heap fragmentation. + * a BlockCache to decrease CMS GC and heap fragmentation. * *
It also can be used as a secondary cache (e.g. using a file on ssd/fusionio to store
- * blocks) to enlarge cache space via
- * {@link org.apache.hadoop.hbase.io.hfile.LruBlockCache#setVictimCache}
+ * blocks) to enlarge cache space via a victim cache.
*/
@InterfaceAudience.Private
public class BucketCache implements BlockCache, HeapSize {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
index b1cc58cae5f4..061ed4f5d279 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/io/hfile/TestCacheConfig.java
@@ -24,7 +24,6 @@
import java.io.IOException;
import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryUsage;
import java.nio.ByteBuffer;
import java.util.Map;
@@ -253,7 +252,8 @@ private void doBucketCacheConfigTest() {
BlockCache [] bcs = cbc.getBlockCaches();
assertTrue(bcs[0] instanceof LruBlockCache);
LruBlockCache lbc = (LruBlockCache)bcs[0];
- assertEquals(HeapMemorySizeUtil.getLruCacheSize(this.conf), lbc.getMaxSize());
+ assertEquals(HeapMemorySizeUtil.getFirstLevelCacheSize(this.conf,
+ ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax()), lbc.getMaxSize());
assertTrue(bcs[1] instanceof BucketCache);
BucketCache bc = (BucketCache)bcs[1];
// getMaxSize comes back in bytes but we specified size in MB
@@ -270,7 +270,8 @@ public void testBucketCacheConfigL1L2Setup() {
// Make lru size is smaller than bcSize for sure. Need this to be true so when eviction
// from L1 happens, it does not fail because L2 can't take the eviction because block too big.
this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.001f);
- long lruExpectedSize = HeapMemorySizeUtil.getLruCacheSize(this.conf);
+ long lruExpectedSize = HeapMemorySizeUtil.getFirstLevelCacheSize(this.conf,
+ ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax());
final int bcSize = 100;
long bcExpectedSize = 100 * 1024 * 1024; // MB.
assertTrue(lruExpectedSize < bcExpectedSize);
diff --git a/hbase-tinylfu-blockcache/pom.xml b/hbase-tinylfu-blockcache/pom.xml
new file mode 100644
index 000000000000..d93aedf24410
--- /dev/null
+++ b/hbase-tinylfu-blockcache/pom.xml
@@ -0,0 +1,405 @@
+
+