diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java index a488185591ce..eccd9f812c33 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/hadoopbackport/ThrottledInputStream.java @@ -28,6 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles; /** * The ThrottleInputStream provides bandwidth throttling on a specified @@ -143,14 +144,10 @@ static long calSleepTimeMs(long bytesRead, long maxBytesPerSec, long elapsed) { } } - private void throttle() throws InterruptedIOException { + private void throttle() { long sleepTime = calSleepTimeMs(); totalSleepTime += sleepTime; - try { - TimeUnit.MILLISECONDS.sleep(sleepTime); - } catch (InterruptedException e) { - throw new InterruptedIOException("Thread aborted"); - } + Uninterruptibles.sleepUninterruptibly(sleepTime, TimeUnit.MILLISECONDS); } /** diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java index a2f09d08170d..68ad5f8912aa 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/RetryCounter.java @@ -26,6 +26,7 @@ import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.Uninterruptibles; /** * Operation retry accounting. @@ -177,11 +178,11 @@ public int getMaxAttempts() { /** * Sleep for a back off time as supplied by the backoff policy, and increases the attempts */ - public void sleepUntilNextRetry() throws InterruptedException { + public void sleepUntilNextRetry() { int attempts = getAttemptTimes(); long sleepTime = getBackoffTime(); LOG.trace("Sleeping {} ms before retry #{}...", sleepTime, attempts); - retryConfig.getTimeUnit().sleep(sleepTime); + Uninterruptibles.sleepUninterruptibly(sleepTime, retryConfig.getTimeUnit()); useRetry(); } diff --git a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestRetryCounter.java b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestRetryCounter.java index c0706b74c94e..6991d29575dd 100644 --- a/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestRetryCounter.java +++ b/hbase-common/src/test/java/org/apache/hadoop/hbase/util/TestRetryCounter.java @@ -40,7 +40,7 @@ public class TestRetryCounter { private static final Logger LOG = LoggerFactory.getLogger(TestRetryCounter.class); @Test - public void testBasics() throws InterruptedException { + public void testBasics() { int maxAttempts = 10; RetryCounterFactory factory = new RetryCounterFactory(maxAttempts, 10, 1000); diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java index 122fad5a0a90..be483672ec00 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java @@ -362,12 +362,7 @@ private Pair execWithRetries(String hostname, ServiceType servi } catch (IOException e) { retryOrThrow(retryCounter, e, hostname, cmd); } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ex) { - // ignore - LOG.warn("Sleep Interrupted:", ex); - } + retryCounter.sleepUntilNextRetry(); } } @@ -407,12 +402,7 @@ public Pair execSudoWithRetries(String hostname, long timeout, } catch (IOException e) { retryOrThrow(retryCounter, e, hostname, cmd); } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ex) { - // ignore - LOG.warn("Sleep Interrupted:", ex); - } + retryCounter.sleepUntilNextRetry(); } } diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java index 10954ce164b8..ec53ea10b01f 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/RESTApiClusterManager.java @@ -513,11 +513,7 @@ private T executeWithRetries(final Callable callable) { throw new RuntimeException("retries exhausted", e); } } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + retryCounter.sleepUntilNextRetry(); } } @@ -535,11 +531,7 @@ private void waitFor(final Callable predicate) { throw new RuntimeException("retries exhausted", e); } } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException e) { - throw new RuntimeException(e); - } + retryCounter.sleepUntilNextRetry(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java index f4e91b56051d..48f61036b78b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MetaRegionLocationCache.java @@ -112,13 +112,7 @@ private void loadMetaLocationsFromZk(RetryCounter retryCounter, ZNodeOpType opTy watcher.abort("Error populating meta locations", ke); return; } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ie) { - LOG.error("Interrupted while loading meta locations from ZK", ie); - Thread.currentThread().interrupt(); - return; - } + retryCounter.sleepUntilNextRetry(); } } if (znodes == null || znodes.isEmpty()) { @@ -182,12 +176,7 @@ private void updateMetaLocation(String path, ZNodeOpType opType) { LOG.warn("Error getting meta location for path {}. Retries exhausted.", path, e); break; } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - return; - } + retryCounter.sleepUntilNextRetry(); } } if (location == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java index cc798cc2443f..277940eae91c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/RegionReplicaFlushHandler.java @@ -130,11 +130,7 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { ServerRegionReplicaUtil.getRegionInfoForDefaultReplica(region.getRegionInfo()) .getRegionNameAsString(), region.getRegionInfo().getRegionNameAsString(), counter.getAttemptTimes(), e); - try { - counter.sleepUntilNextRetry(); - } catch (InterruptedException e1) { - throw new InterruptedIOException(e1.getMessage()); - } + counter.sleepUntilNextRetry(); continue; } @@ -188,11 +184,7 @@ void triggerFlushInPrimaryRegion(final HRegion region) throws IOException { break; } } - try { - counter.sleepUntilNextRetry(); - } catch (InterruptedException e) { - throw new InterruptedIOException(e.getMessage()); - } + counter.sleepUntilNextRetry(); } region.setReadsEnabled(true); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java index 6e090770b756..068db312e8fe 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/replication/regionserver/RegionReplicaReplicationEndpoint.java @@ -281,13 +281,7 @@ public boolean replicate(ReplicateContext replicateContext) { if (!retryCounter.shouldRetry()) { return false; } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException e1) { - // restore the interrupted state - Thread.currentThread().interrupt(); - return false; - } + retryCounter.sleepUntilNextRetry(); continue outer; } if (!requiresReplication(tableDesc, entry)) { @@ -372,13 +366,7 @@ public boolean replicate(ReplicateContext replicateContext) { if (!retryCounter.shouldRetry()) { return false; } - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException e) { - // restore the interrupted state - Thread.currentThread().interrupt(); - return false; - } + retryCounter.sleepUntilNextRetry(); } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index 39da91ba5b0b..3e2e48d96f07 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -457,14 +457,8 @@ private FSDataOutputStream createFileWithRetries(final FileSystem fs, + retryCounter.getMaxAttempts()); LOG.debug("Failed to create lock file " + hbckLockFilePath.getName(), ioe); - try { - exception = ioe; - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ie) { - throw (InterruptedIOException) new InterruptedIOException( - "Can't create lock file " + hbckLockFilePath.getName()) - .initCause(ie); - } + exception = ioe; + retryCounter.sleepUntilNextRetry(); } } while (retryCounter.shouldRetry()); @@ -518,14 +512,7 @@ private void unlockHbck() { + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); LOG.debug("Failed to delete " + HBCK_LOCK_PATH, ioe); - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ie) { - Thread.currentThread().interrupt(); - LOG.warn("Interrupted while deleting lock file" + - HBCK_LOCK_PATH); - return; - } + retryCounter.sleepUntilNextRetry(); } } while (retryCounter.shouldRetry()); } @@ -771,12 +758,7 @@ private boolean setMasterInMaintenanceMode() throws IOException { LOG.warn("Fail to create znode " + hbckEphemeralNodePath + ", try=" + (retryCounter.getAttemptTimes() + 1) + " of " + retryCounter.getMaxAttempts()); - try { - retryCounter.sleepUntilNextRetry(); - } catch (InterruptedException ie) { - throw (InterruptedIOException) new InterruptedIOException( - "Can't create znode " + hbckEphemeralNodePath).initCause(ie); - } + retryCounter.sleepUntilNextRetry(); } while (retryCounter.shouldRetry()); return hbckZodeCreated; }