|
67 | 67 | import org.apache.hadoop.hbase.io.hfile.Cacheable; |
68 | 68 | import org.apache.hadoop.hbase.io.hfile.CachedBlock; |
69 | 69 | import org.apache.hadoop.hbase.io.hfile.HFileBlock; |
| 70 | +import org.apache.hadoop.hbase.io.hfile.HFileContext; |
70 | 71 | import org.apache.hadoop.hbase.nio.ByteBuff; |
71 | 72 | import org.apache.hadoop.hbase.nio.RefCnt; |
72 | 73 | import org.apache.hadoop.hbase.protobuf.ProtobufMagic; |
| 74 | +import org.apache.hadoop.hbase.util.Bytes; |
73 | 75 | import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; |
74 | 76 | import org.apache.hadoop.hbase.util.IdReadWriteLock; |
75 | 77 | import org.apache.hadoop.hbase.util.IdReadWriteLockStrongRef; |
@@ -249,6 +251,10 @@ public class BucketCache implements BlockCache, HeapSize { |
249 | 251 | * */ |
250 | 252 | private String algorithm; |
251 | 253 |
|
| 254 | + /* Tracing failed Bucket Cache allocations. */ |
| 255 | + private long allocFailLogPrevTs; // time of previous log event for allocation failure. |
| 256 | + private static final int ALLOCATION_FAIL_LOG_TIME_PERIOD = 60000; // Default 1 minute. |
| 257 | + |
252 | 258 | public BucketCache(String ioEngineName, long capacity, int blockSize, int[] bucketSizes, |
253 | 259 | int writerThreadNum, int writerQLen, String persistencePath) throws IOException { |
254 | 260 | this(ioEngineName, capacity, blockSize, bucketSizes, writerThreadNum, writerQLen, |
@@ -291,6 +297,8 @@ public BucketCache(String ioEngineName, long capacity, int blockSize, int[] buck |
291 | 297 | this.blockSize = blockSize; |
292 | 298 | this.ioErrorsTolerationDuration = ioErrorsTolerationDuration; |
293 | 299 |
|
| 300 | + this.allocFailLogPrevTs = 0; |
| 301 | + |
294 | 302 | bucketAllocator = new BucketAllocator(capacity, bucketSizes); |
295 | 303 | for (int i = 0; i < writerThreads.length; ++i) { |
296 | 304 | writerQueues.add(new ArrayBlockingQueue<>(writerQLen)); |
@@ -728,7 +736,8 @@ public void logStats() { |
728 | 736 | (StringUtils.formatPercent(cacheStats.getHitCachingRatio(), 2)+ ", ")) + |
729 | 737 | "evictions=" + cacheStats.getEvictionCount() + ", " + |
730 | 738 | "evicted=" + cacheStats.getEvictedCount() + ", " + |
731 | | - "evictedPerRun=" + cacheStats.evictedPerEviction()); |
| 739 | + "evictedPerRun=" + cacheStats.evictedPerEviction() + ", " + |
| 740 | + "allocationFailCount=" + cacheStats.getAllocationFailCount()); |
732 | 741 | cacheStats.reset(); |
733 | 742 | } |
734 | 743 |
|
@@ -1036,7 +1045,29 @@ void doDrain(final List<RAMQueueEntry> entries) throws InterruptedException { |
1036 | 1045 | } |
1037 | 1046 | index++; |
1038 | 1047 | } catch (BucketAllocatorException fle) { |
1039 | | - LOG.warn("Failed allocation for " + (re == null ? "" : re.getKey()) + "; " + fle); |
| 1048 | + long currTs = System.currentTimeMillis(); // Current time since Epoch in milliseconds. |
| 1049 | + cacheStats.allocationFailed(); // Record the warning. |
| 1050 | + if (allocFailLogPrevTs == 0 || (currTs - allocFailLogPrevTs) > ALLOCATION_FAIL_LOG_TIME_PERIOD) { |
| 1051 | + String failHFileName = ""; |
| 1052 | + String failColumnFamily = ""; |
| 1053 | + String failTableName = ""; |
| 1054 | + if (re != null) { |
| 1055 | + Cacheable failData = re.getData(); |
| 1056 | + if (failData instanceof HFileBlock) { |
| 1057 | + HFileBlock failBlock = (HFileBlock) failData; |
| 1058 | + HFileContext failFileContext = failBlock.getHFileContext(); |
| 1059 | + failHFileName = failFileContext.getHFileName(); |
| 1060 | + failColumnFamily = Bytes.toString(failFileContext.getColumnFamily()); |
| 1061 | + failTableName = Bytes.toString(failFileContext.getTableName()); |
| 1062 | + } |
| 1063 | + } |
| 1064 | + LOG.warn("Most recent failed allocation in " + ALLOCATION_FAIL_LOG_TIME_PERIOD + |
| 1065 | + " milliseconds; Key: " + (re == null ? "" : re.getKey()) + |
| 1066 | + ", TableName = " + failTableName + ", ColumnFamily = " + failColumnFamily + |
| 1067 | + ", HFileName : " + failHFileName |
| 1068 | + , fle); |
| 1069 | + allocFailLogPrevTs = currTs; |
| 1070 | + } |
1040 | 1071 | // Presume can't add. Too big? Move index on. Entry will be cleared from ramCache below. |
1041 | 1072 | bucketEntries[index] = null; |
1042 | 1073 | index++; |
|
0 commit comments