diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java index cfaceb3219bc..d169a9029ce6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/CompactSplit.java @@ -420,7 +420,13 @@ private Optional selectCompaction(HRegion region, HStore stor throws IOException { // don't even select for compaction if disableCompactions is set to true if (!isCompactionsEnabled()) { - LOG.info(String.format("User has disabled compactions")); + LOG.info("User has disabled compactions"); + return Optional.empty(); + } + if (store.isCompacting()) { + // There is only one Compactor instance for a given store. We cannot concurrently compact + // the store. + LOG.debug("Store is already compacting"); return Optional.empty(); } Optional compaction = store.requestCompaction(priority, tracker, user); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java index 41f18382073a..e74ade457e03 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HStore.java @@ -156,8 +156,6 @@ public class HStore implements Store, HeapSize, StoreConfigInformation, // rows that has cells from both memstore and files (or only files) private LongAdder mixedRowReadsCount = new LongAdder(); - private boolean cacheOnWriteLogged; - /** * Lock specific to archiving compacted store files. This avoids races around * the combination of retrieving the list of compacted files and moving them to @@ -290,7 +288,6 @@ protected HStore(final HRegion region, final ColumnFamilyDescriptor family, this, memstore.getClass().getSimpleName(), policyName, verifyBulkLoads, parallelPutCountPrintThreshold, family.getDataBlockEncoding(), family.getCompressionType()); - cacheOnWriteLogged = false; } private StoreContext initializeStoreContext(ColumnFamilyDescriptor family) throws IOException { @@ -578,6 +575,13 @@ public Collection getCompactedFiles() { return this.storeEngine.getStoreFileManager().getCompactedfiles(); } + @Override + public boolean isCompacting() { + synchronized (filesCompacting) { + return !filesCompacting.isEmpty(); + } + } + /** * This throws a WrongRegionException if the HFile does not fit in this region, or an * InvalidHFileException if the HFile is not valid. @@ -2404,4 +2408,5 @@ void updateMetricsStore(boolean memstoreRead) { mixedRowReadsCount.increment(); } } + } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java index fd1acd9a1363..05d739917848 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Store.java @@ -54,6 +54,8 @@ public interface Store { Collection getCompactedFiles(); + boolean isCompacting(); + /** * When was the last edit done in the memstore */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java index d934ecb0c16d..4123ada11170 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java @@ -355,10 +355,12 @@ protected final List compact(final CompactionRequestImpl request, smallestReadPoint = Math.min(fd.minSeqIdToKeep, smallestReadPoint); cleanSeqId = true; } - if (writer != null){ - LOG.warn("Writer exists when it should not: " + getCompactionTargets().stream() + if (writer != null) { + String message = "Writer exists when it should not: " + getCompactionTargets().stream() .map(n -> n.toString()) - .collect(Collectors.joining(", ", "{ ", " }"))); + .collect(Collectors.joining(", ", "{ ", " }")); + LOG.error(message); + throw new IllegalStateException(message); } writer = sinkFactory.createWriter(scanner, fd, dropCache, request.isMajor()); finished = performCompaction(fd, scanner, smallestReadPoint, cleanSeqId, @@ -564,7 +566,7 @@ public List getCompactionTargets() { /** * Reset the Writer when the new storefiles were successfully added */ - public void resetWriter(){ + public void resetWriter() { writer = null; } }