diff --git a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java index cbb728ab5f1d..6c3ee02abb83 100644 --- a/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java +++ b/hbase-backup/src/main/java/org/apache/hadoop/hbase/backup/regionserver/LogRollBackupSubprocedurePool.java @@ -32,7 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -62,10 +62,9 @@ public LogRollBackupSubprocedurePool(String name, Configuration conf) { LogRollRegionServerProcedureManager.BACKUP_TIMEOUT_MILLIS_DEFAULT); int threads = conf.getInt(CONCURENT_BACKUP_TASKS_KEY, DEFAULT_CONCURRENT_BACKUP_TASKS); this.name = name; - executor = - new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(), - Threads.newDaemonThreadFactory("rs(" + name + ")-backup")); + executor = new ThreadPoolExecutor(1, threads, keepAlive, TimeUnit.SECONDS, + new LinkedBlockingQueue<>(), + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-backup-pool-%d").build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 0f12e90ebe20..ebaa9725ef12 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -52,8 +52,8 @@ import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.util.ConcurrentMapUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +76,8 @@ class AsyncConnectionImpl implements AsyncConnection { @VisibleForTesting static final HashedWheelTimer RETRY_TIMER = new HashedWheelTimer( - Threads.newDaemonThreadFactory("Async-Client-Retry-Timer"), 10, TimeUnit.MILLISECONDS); + new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").build(), 10, + TimeUnit.MILLISECONDS); private static final String RESOLVE_HOSTNAME_ON_FAIL_KEY = "hbase.resolve.hostnames.on.failure"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index ccdfec7f4f6f..553148b32dd0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -178,8 +178,9 @@ public boolean isDeadServer(ServerName sn) { @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) class MulticastListener implements Listener { private DatagramChannel channel; - private final EventLoopGroup group = new NioEventLoopGroup( - 1, Threads.newDaemonThreadFactory("hbase-client-clusterStatusListener")); + private final EventLoopGroup group = new NioEventLoopGroup(1, + new ThreadFactoryBuilder().setNameFormat("hbase-client-clusterStatusListener-pool-%d") + .build()); public MulticastListener() { } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index bf2f36157359..d63f2a9504e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -41,9 +41,9 @@ import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.PoolMap; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -91,10 +91,12 @@ public abstract class AbstractRpcClient implements RpcC public static final Logger LOG = LoggerFactory.getLogger(AbstractRpcClient.class); protected static final HashedWheelTimer WHEEL_TIMER = new HashedWheelTimer( - Threads.newDaemonThreadFactory("RpcClient-timer"), 10, TimeUnit.MILLISECONDS); + new ThreadFactoryBuilder().setNameFormat("RpcClient-timer-pool-%d").build(), 10, + TimeUnit.MILLISECONDS); private static final ScheduledExecutorService IDLE_CONN_SWEEPER = Executors - .newScheduledThreadPool(1, Threads.newDaemonThreadFactory("Idle-Rpc-Conn-Sweeper")); + .newScheduledThreadPool(1, + new ThreadFactoryBuilder().setNameFormat("Idle-Rpc-Conn-Sweeper-pool-%d").build()); protected boolean running = true; // if client runs diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java index 7c9d9275e318..d127b95876b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcConnection.java @@ -33,8 +33,8 @@ import org.apache.hadoop.hbase.security.NettyHBaseRpcConnectionHeaderHandler; import org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler; import org.apache.hadoop.hbase.security.SaslChallengeDecoder; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,8 +76,9 @@ class NettyRpcConnection extends RpcConnection { private static final Logger LOG = LoggerFactory.getLogger(NettyRpcConnection.class); - private static final ScheduledExecutorService RELOGIN_EXECUTOR = - Executors.newSingleThreadScheduledExecutor(Threads.newDaemonThreadFactory("Relogin")); + private static final ScheduledExecutorService RELOGIN_EXECUTOR = Executors + .newSingleThreadScheduledExecutor( + new ThreadFactoryBuilder().setNameFormat("Relogin-pool-%d").build()); private final NettyRpcClient rpcClient; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java index 516075c462ec..13a6d398bcce 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Threads.java @@ -33,6 +33,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -197,75 +198,6 @@ public static ThreadPoolExecutor getBoundedCachedThreadPool( return boundedCachedThreadPool; } - public static ThreadPoolExecutor getBoundedCachedThreadPool(int maxCachedThread, long timeout, - TimeUnit unit, String prefix) { - return getBoundedCachedThreadPool(maxCachedThread, timeout, unit, - newDaemonThreadFactory(prefix)); - } - - /** - * Returns a {@link java.util.concurrent.ThreadFactory} that names each created thread uniquely, - * with a common prefix. - * @param prefix The prefix of every created Thread's name - * @return a {@link java.util.concurrent.ThreadFactory} that names threads - */ - public static ThreadFactory getNamedThreadFactory(final String prefix) { - SecurityManager s = System.getSecurityManager(); - final ThreadGroup threadGroup = (s != null) ? s.getThreadGroup() : Thread.currentThread() - .getThreadGroup(); - - return new ThreadFactory() { - final AtomicInteger threadNumber = new AtomicInteger(1); - private final int poolNumber = Threads.poolNumber.getAndIncrement(); - final ThreadGroup group = threadGroup; - - @Override - public Thread newThread(Runnable r) { - final String name = prefix + "-pool" + poolNumber + "-t" + threadNumber.getAndIncrement(); - return new Thread(group, r, name); - } - }; - } - - /** - * Same as {#newDaemonThreadFactory(String, UncaughtExceptionHandler)}, - * without setting the exception handler. - */ - public static ThreadFactory newDaemonThreadFactory(final String prefix) { - return newDaemonThreadFactory(prefix, null); - } - - /** - * Get a named {@link ThreadFactory} that just builds daemon threads. - * @param prefix name prefix for all threads created from the factory - * @param handler unhandles exception handler to set for all threads - * @return a thread factory that creates named, daemon threads with - * the supplied exception handler and normal priority - */ - public static ThreadFactory newDaemonThreadFactory(final String prefix, - final UncaughtExceptionHandler handler) { - final ThreadFactory namedFactory = getNamedThreadFactory(prefix); - return new ThreadFactory() { - @Override - public Thread newThread(Runnable r) { - Thread t = namedFactory.newThread(r); - if (handler != null) { - t.setUncaughtExceptionHandler(handler); - } else { - t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER); - } - if (!t.isDaemon()) { - t.setDaemon(true); - } - if (t.getPriority() != Thread.NORM_PRIORITY) { - t.setPriority(Thread.NORM_PRIORITY); - } - return t; - } - - }; - } - /** Sets an UncaughtExceptionHandler for the thread which logs the * Exception stack if the thread dies. */ @@ -273,7 +205,7 @@ public static void setLoggingUncaughtExceptionHandler(Thread t) { t.setUncaughtExceptionHandler(LOGGING_EXCEPTION_HANDLER); } - private static interface PrintThreadInfoHelper { + private interface PrintThreadInfoHelper { void printThreadInfo(PrintStream stream, String title); diff --git a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java index b8b321395cf6..27470d5b6cf8 100644 --- a/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java +++ b/hbase-examples/src/main/java/org/apache/hadoop/hbase/client/example/AsyncClientExample.java @@ -34,9 +34,9 @@ import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -130,7 +130,7 @@ public int run(String[] args) throws Exception { TableName tableName = TableName.valueOf(args[0]); int numOps = args.length > 1 ? Integer.parseInt(args[1]) : DEFAULT_NUM_OPS; ExecutorService threadPool = Executors.newFixedThreadPool(THREAD_POOL_SIZE, - Threads.newDaemonThreadFactory("AsyncClientExample")); + new ThreadFactoryBuilder().setNameFormat("AsyncClientExample-pool-%d").build()); // We use AsyncTable here so we need to provide a separated thread pool. RawAsyncTable does not // need a thread pool and may have a better performance if you use it correctly as it can save // some context switches. But if you use RawAsyncTable incorrectly, you may have a very bad diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java index d801ba81b601..c755d58dd111 100644 --- a/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java +++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/chaos/policies/TwoConcurrentActionPolicy.java @@ -20,8 +20,8 @@ import org.apache.hadoop.hbase.chaos.actions.Action; import org.apache.hadoop.hbase.chaos.monkies.PolicyBasedChaosMonkey; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -42,7 +42,7 @@ public TwoConcurrentActionPolicy(long sleepTime, Action[] actionsOne, Action[] a this.actionsOne = actionsOne; this.actionsTwo = actionsTwo; executor = Executors.newFixedThreadPool(2, - Threads.newDaemonThreadFactory("TwoConcurrentAction")); + new ThreadFactoryBuilder().setNameFormat("TwoConcurrentAction-pool-%d").build()); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 7d1bd4e228b4..6da95fa58a6c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -100,8 +101,8 @@ public boolean start() { // Create the thread pool that will execute RPCs threadPool = Threads.getBoundedCachedThreadPool(corePoolSize, 60L, TimeUnit.SECONDS, - Threads.newDaemonThreadFactory(this.getClass().getSimpleName(), - getUncaughtExceptionHandler())); + new ThreadFactoryBuilder().setNameFormat(this.getClass().getSimpleName() + "-pool-%d") + .setUncaughtExceptionHandler(getUncaughtExceptionHandler()).build()); return true; } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java index b668e4186c8d..40def4085e2f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/FifoRpcScheduler.java @@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,14 +60,10 @@ public void init(Context context) { public void start() { LOG.info("Using {} as user call queue; handlerCount={}; maxQueueLength={}", this.getClass().getSimpleName(), handlerCount, maxQueueLength); - this.executor = new ThreadPoolExecutor( - handlerCount, - handlerCount, - 60, - TimeUnit.SECONDS, - new ArrayBlockingQueue<>(maxQueueLength), - Threads.newDaemonThreadFactory("FifoRpcScheduler.handler"), - new ThreadPoolExecutor.CallerRunsPolicy()); + this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, + new ArrayBlockingQueue<>(maxQueueLength), + new ThreadFactoryBuilder().setNameFormat("FifoRpcScheduler.handler-pool-%d").build(), + new ThreadPoolExecutor.CallerRunsPolicy()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java index b60fcece0372..67b945b788b3 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/MasterFifoRpcScheduler.java @@ -25,7 +25,7 @@ import java.util.concurrent.atomic.AtomicInteger; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; @@ -71,13 +71,14 @@ public void start() { this.getClass().getSimpleName(), handlerCount, maxQueueLength, rsReportHandlerCount, rsRsreportMaxQueueLength); this.executor = new ThreadPoolExecutor(handlerCount, handlerCount, 60, TimeUnit.SECONDS, - new ArrayBlockingQueue(maxQueueLength), - Threads.newDaemonThreadFactory("MasterFifoRpcScheduler.call.handler"), - new ThreadPoolExecutor.CallerRunsPolicy()); - this.rsReportExecutor = new ThreadPoolExecutor(rsReportHandlerCount, rsReportHandlerCount, 60, - TimeUnit.SECONDS, new ArrayBlockingQueue(rsRsreportMaxQueueLength), - Threads.newDaemonThreadFactory("MasterFifoRpcScheduler.RSReport.handler"), - new ThreadPoolExecutor.CallerRunsPolicy()); + new ArrayBlockingQueue<>(maxQueueLength), + new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.call.handler-pool-%d") + .build(), new ThreadPoolExecutor.CallerRunsPolicy()); + this.rsReportExecutor = + new ThreadPoolExecutor(rsReportHandlerCount, rsReportHandlerCount, 60, TimeUnit.SECONDS, + new ArrayBlockingQueue<>(rsRsreportMaxQueueLength), + new ThreadFactoryBuilder().setNameFormat("MasterFifoRpcScheduler.RSReport.handler-pool-%d") + .build(), new ThreadPoolExecutor.CallerRunsPolicy()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java index 825746695f2c..046d11c9e3dc 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ClusterStatusPublisher.java @@ -45,8 +45,8 @@ import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.ReflectionUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; @@ -245,8 +245,9 @@ public interface Publisher extends Closeable { @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class MulticastPublisher implements Publisher { private DatagramChannel channel; - private final EventLoopGroup group = new NioEventLoopGroup( - 1, Threads.newDaemonThreadFactory("hbase-master-clusterStatusPublisher")); + private final EventLoopGroup group = new NioEventLoopGroup(1, + new ThreadFactoryBuilder().setNameFormat("hbase-master-clusterStatusPublisher-pool-%d") + .build()); public MulticastPublisher() { } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java index c9cc906f16c1..e520f65b4e4b 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/assignment/SplitTableRegionProcedure.java @@ -68,9 +68,9 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.FSUtils; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALSplitUtil; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -679,7 +679,7 @@ private Pair splitStoreFiles(final MasterProcedureEnv env, LOG.info("pid=" + getProcId() + " splitting " + nbFiles + " storefiles, region=" + getParentRegion().getShortNameToLog() + ", threads=" + maxThreads); final ExecutorService threadPool = Executors.newFixedThreadPool(maxThreads, - Threads.newDaemonThreadFactory("StoreFileSplitter-%1$d")); + new ThreadFactoryBuilder().setNameFormat("StoreFileSplitter-pool-%d").build()); final List>> futures = new ArrayList>>(nbFiles); // Split each store file. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java index 30e78eb51306..0537a5bafc38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/DirScanPool.java @@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.conf.ConfigurationObserver; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,7 +51,8 @@ public DirScanPool(Configuration conf) { } private static ThreadPoolExecutor initializePool(int size) { - return Threads.getBoundedCachedThreadPool(size, 1, TimeUnit.MINUTES, "dir-scan"); + return Threads.getBoundedCachedThreadPool(size, 1, TimeUnit.MINUTES, + new ThreadFactoryBuilder().setNameFormat("dir-scan-pool-%d").build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java index cb3512ae94b2..ff398a7b6b84 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/namequeues/NamedQueueRecorder.java @@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.namequeues.request.NamedQueueGetRequest; import org.apache.hadoop.hbase.namequeues.response.NamedQueueGetResponse; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -64,7 +64,8 @@ private NamedQueueRecorder(Configuration conf) { // disruptor initialization with BlockingWaitStrategy this.disruptor = new Disruptor<>(RingBufferEnvelope::new, getEventCount(eventCount), - Threads.newDaemonThreadFactory(hostingThreadName + ".slowlog.append"), + new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".slowlog.append-pool-%d") + .build(), ProducerType.MULTI, new BlockingWaitStrategy()); this.disruptor.setDefaultExceptionHandler(new DisruptorExceptionHandler()); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java index 5dd77860fa24..cd37f6f0d933 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureCoordinator.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -112,8 +112,9 @@ public static ThreadPoolExecutor defaultPool(String coordName, int opThreads) { public static ThreadPoolExecutor defaultPool(String coordName, int opThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, opThreads, keepAliveMillis, TimeUnit.MILLISECONDS, - new SynchronousQueue<>(), - Threads.newDaemonThreadFactory("(" + coordName + ")-proc-coordinator")); + new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("(" + coordName + ")-proc-coordinator-pool-%d") + .build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java index aa1a489a2ea7..cb26be2f03e2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/ProcedureMember.java @@ -27,7 +27,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.errorhandling.ForeignException; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,8 +86,9 @@ public static ThreadPoolExecutor defaultPool(String memberName, int procThreads) public static ThreadPoolExecutor defaultPool(String memberName, int procThreads, long keepAliveMillis) { return new ThreadPoolExecutor(1, procThreads, keepAliveMillis, TimeUnit.MILLISECONDS, - new SynchronousQueue<>(), - Threads.newDaemonThreadFactory("member: '" + memberName + "' subprocedure")); + new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("member: '" + memberName + "' subprocedure-pool-%d") + .build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java index 31d0276ddde9..fec5039b2df0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/procedure/flush/RegionServerFlushTableProcedureManager.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.zookeeper.KeeperException; import org.apache.yetus.audience.InterfaceAudience; @@ -214,7 +215,7 @@ static class FlushTableSubprocedurePool { int threads = conf.getInt(CONCURENT_FLUSH_TASKS_KEY, DEFAULT_CONCURRENT_FLUSH_TASKS); this.name = name; executor = Threads.getBoundedCachedThreadPool(threads, keepAlive, TimeUnit.MILLISECONDS, - "rs(" + name + ")-flush-proc"); + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-flush-proc-pool-%d").build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java index a2ea802a7dfd..c0f189e83bdd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java @@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.htrace.core.TraceScope; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -517,8 +518,9 @@ void interruptIfNecessary() { } synchronized void start(UncaughtExceptionHandler eh) { - ThreadFactory flusherThreadFactory = Threads.newDaemonThreadFactory( - server.getServerName().toShortString() + "-MemStoreFlusher", eh); + ThreadFactory flusherThreadFactory = new ThreadFactoryBuilder() + .setNameFormat(server.getServerName().toShortString() + "-MemStoreFlusher-pool-%d") + .setUncaughtExceptionHandler(eh).build(); for (int i = 0; i < flushHandlers.length; i++) { flushHandlers[i] = new FlushHandler("MemStoreFlusher." + i); flusherThreadFactory.newThread(flushHandlers[i]); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java index 75b672cf38ed..316dc092c60d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/snapshot/RegionServerSnapshotManager.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.hadoop.hbase.client.RegionReplicaUtil; @@ -283,7 +284,7 @@ static class SnapshotSubprocedurePool { int threads = conf.getInt(CONCURENT_SNAPSHOT_TASKS_KEY, DEFAULT_CONCURRENT_SNAPSHOT_TASKS); this.name = name; executor = Threads.getBoundedCachedThreadPool(threads, keepAlive, TimeUnit.MILLISECONDS, - "rs(" + name + ")-snapshot"); + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-snapshot-pool-%d").build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java index 63f4029b1f60..8ca6db4b35c5 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.ClassSize; import org.apache.hadoop.hbase.util.CommonFSUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.FSHLogProvider; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKeyImpl; @@ -242,9 +241,9 @@ public FSHLog(final FileSystem fs, final Path rootDir, final String logDir, // Using BlockingWaitStrategy. Stuff that is going on here takes so long it makes no sense // spinning as other strategies do. this.disruptor = new Disruptor<>(RingBufferTruck::new, - getPreallocatedEventCount(), - Threads.newDaemonThreadFactory(hostingThreadName + ".append"), - ProducerType.MULTI, new BlockingWaitStrategy()); + getPreallocatedEventCount(), + new ThreadFactoryBuilder().setNameFormat(hostingThreadName + ".append-pool-%d").build(), + ProducerType.MULTI, new BlockingWaitStrategy()); // Advance the ring buffer sequence so that it starts from 1 instead of 0, // because SyncFuture.NOT_DONE = 0. this.disruptor.getRingBuffer().next(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java index 38158e5fe563..d28dbd409f8d 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/ZKPermissionWatcher.java @@ -21,11 +21,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKListener; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -69,7 +69,7 @@ public ZKPermissionWatcher(ZKWatcher watcher, String aclZnodeParent = conf.get("zookeeper.znode.acl.parent", ACL_NODE); this.aclZNode = ZNodePaths.joinZNode(watcher.getZNodePaths().baseZNode, aclZnodeParent); executor = Executors.newSingleThreadExecutor( - Threads.newDaemonThreadFactory("zk-permission-watcher")); + new ThreadFactoryBuilder().setNameFormat("zk-permission-watcher-pool-%d").build()); } public void start() throws KeeperException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java index 1bda5709182a..eb1afcdc8f4a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/snapshot/SnapshotManifest.java @@ -26,7 +26,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; import java.util.concurrent.ThreadPoolExecutor; @@ -52,6 +51,7 @@ import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSTableDescriptors; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -616,7 +616,7 @@ private ThreadPoolExecutor createExecutor(final String name) { public static ThreadPoolExecutor createExecutor(final Configuration conf, final String name) { int maxThreads = conf.getInt("hbase.snapshot.thread.pool.max", 8); return Threads.getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - Threads.newDaemonThreadFactory(name)); + new ThreadFactoryBuilder().setNameFormat(name + "-pool-%d").build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java index 3120d8adadf4..5167826f1203 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/HFileContentValidator.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FSUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -108,7 +108,7 @@ private ExecutorService createThreadPool(Configuration conf) { int availableProcessors = Runtime.getRuntime().availableProcessors(); int numThreads = conf.getInt("hfilevalidator.numthreads", availableProcessors); return Executors.newFixedThreadPool(numThreads, - Threads.newDaemonThreadFactory("hfile-validator")); + new ThreadFactoryBuilder().setNameFormat("hfile-validator-pool-%d").build()); } @Override diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index a75d31cddeb3..83b268362ead 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -83,6 +83,7 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.Progressable; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1657,7 +1658,7 @@ public boolean accept(Path path) { // run in multiple threads final ExecutorService tpe = Executors.newFixedThreadPool(threadPoolSize, - Threads.newDaemonThreadFactory("FSRegionQuery")); + new ThreadFactoryBuilder().setNameFormat("FSRegionQuery-pool-%d").build()); try { // ignore all file status items that are not of interest for (FileStatus regionStatus : statusList) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index c42ec703b625..a8b069986db0 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -134,6 +134,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.apache.zookeeper.KeeperException; @@ -347,7 +348,8 @@ public HBaseFsck(Configuration conf) throws IOException, ClassNotFoundException private static ExecutorService createThreadPool(Configuration conf) { int numThreads = conf.getInt("hbasefsck.numthreads", MAX_NUM_THREADS); - return new ScheduledThreadPoolExecutor(numThreads, Threads.newDaemonThreadFactory("hbasefsck")); + return new ScheduledThreadPoolExecutor(numThreads, + new ThreadFactoryBuilder().setNameFormat("hbasefsck-pool-%d").build()); } /** diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java index c0189c83aae2..479818454538 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/ModifyRegionUtils.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hbase.client.RegionInfoBuilder; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -232,7 +233,7 @@ static ThreadPoolExecutor getRegionOpenAndInitThreadPool(final Configuration con "hbase.hregion.open.and.init.threads.max", 16)); ThreadPoolExecutor regionOpenAndInitThreadPool = Threads. getBoundedCachedThreadPool(maxThreads, 30L, TimeUnit.SECONDS, - Threads.newDaemonThreadFactory(threadNamePrefix)); + new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "-pool-%d").build()); return regionOpenAndInitThreadPool; } } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java index f60721744e27..45f35eaba2e9 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/OutputSink.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +77,7 @@ public OutputSink(WALSplitter.PipelineController controller, EntryBuffers entryB this.controller = controller; this.entryBuffers = entryBuffers; this.closeThreadPool = Threads.getBoundedCachedThreadPool(numThreads, 30L, TimeUnit.SECONDS, - Threads.newDaemonThreadFactory("split-log-closeStream-")); + new ThreadFactoryBuilder().setNameFormat("split-log-closeStream-pool-%d").build()); this.closeCompletionService = new ExecutorCompletionService<>(closeThreadPool); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java index 45648062fd78..17a76de5b248 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/AcidGuaranteesTestTool.java @@ -44,9 +44,9 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,8 +90,9 @@ private ExecutorService createThreadPool() { BlockingQueue workQueue = new LinkedBlockingQueue( maxThreads * HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS); - ThreadPoolExecutor tpe = new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, - TimeUnit.SECONDS, workQueue, Threads.newDaemonThreadFactory(toString() + "-shared")); + ThreadPoolExecutor tpe = + new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, + new ThreadFactoryBuilder().setNameFormat(toString() + "-shared-pool-%d").build()); tpe.allowCoreThreadTimeOut(true); return tpe; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java index 94adce72bb0e..a1037b7bb1f2 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableGetMultiThreaded.java @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.testclassification.LargeTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.RetryCounter; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -137,8 +137,8 @@ public void test() throws Exception { LOG.info("====== Test started ======"); int numThreads = 7; AtomicBoolean stop = new AtomicBoolean(false); - ExecutorService executor = - Executors.newFixedThreadPool(numThreads, Threads.newDaemonThreadFactory("TestAsyncGet-")); + ExecutorService executor = Executors.newFixedThreadPool(numThreads, + new ThreadFactoryBuilder().setNameFormat("TestAsyncGet-pool-%d").build()); List> futures = new ArrayList<>(); IntStream.range(0, numThreads).forEach(i -> futures.add(executor.submit(() -> { run(stop); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java index 5ee837af1878..93ae35b7c54d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/coprocessor/TestOpenTableInCoprocessor.java @@ -42,8 +42,8 @@ import org.apache.hadoop.hbase.client.TableDescriptorBuilder; import org.apache.hadoop.hbase.testclassification.CoprocessorTests; import org.apache.hadoop.hbase.testclassification.MediumTests; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -102,8 +102,9 @@ private ExecutorService getPool() { int maxThreads = 1; long keepAliveTime = 60; ThreadPoolExecutor pool = - new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, - new SynchronousQueue<>(), Threads.newDaemonThreadFactory("hbase-table")); + new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS, + new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("hbase-table-pool-%d").build()); pool.allowCoreThreadTimeOut(true); return pool; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java index 6f12b95510eb..dbad831dcfe4 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/assignment/TestRegionStates.java @@ -19,7 +19,6 @@ import static org.junit.Assert.assertEquals; -import java.lang.Thread.UncaughtExceptionHandler; import java.util.List; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorCompletionService; @@ -36,6 +35,7 @@ import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; @@ -63,13 +63,9 @@ public class TestRegionStates { @BeforeClass public static void setUp() throws Exception { threadPool = Threads.getBoundedCachedThreadPool(32, 60L, TimeUnit.SECONDS, - Threads.newDaemonThreadFactory("ProcedureDispatcher", - new UncaughtExceptionHandler() { - @Override - public void uncaughtException(Thread t, Throwable e) { - LOG.warn("Failed thread " + t.getName(), e); - } - })); + new ThreadFactoryBuilder().setNameFormat("ProcedureDispatcher-pool-%d") + .setUncaughtExceptionHandler((t, e) -> LOG.warn("Failed thread " + t.getName(), e)) + .build()); executorService = new ExecutorCompletionService(threadPool); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java index d90dbde3534a..715d81212346 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/procedure/SimpleRSProcedureManager.java @@ -24,19 +24,19 @@ import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorCompletionService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Abortable; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.errorhandling.ForeignException; import org.apache.hadoop.hbase.errorhandling.ForeignExceptionDispatcher; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -115,19 +115,18 @@ public Subprocedure buildSubprocedure(String name, byte[] data) { } } - public class SimpleSubprocedurePool implements Closeable, Abortable { + public static class SimpleSubprocedurePool implements Closeable, Abortable { private final ExecutorCompletionService taskPool; - private final ThreadPoolExecutor executor; + private final ExecutorService executor; private volatile boolean aborted; private final List> futures = new ArrayList<>(); private final String name; public SimpleSubprocedurePool(String name, Configuration conf) { this.name = name; - executor = new ThreadPoolExecutor(1, 1, 500, - TimeUnit.SECONDS, new LinkedBlockingQueue<>(), - Threads.newDaemonThreadFactory("rs(" + name + ")-procedure")); + executor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setNameFormat("rs(" + name + ")-procedure-pool-%d").build()); taskPool = new ExecutorCompletionService<>(executor); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java index 17fb7db06543..1d3ad3002896 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestRegionServerReportForDuty.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.util.JVMClusterUtil.MasterThread; import org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread; import org.apache.hadoop.hbase.util.ManualEnvironmentEdge; -import org.apache.hadoop.hbase.util.Threads; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.log4j.Appender; import org.apache.log4j.Layout; import org.apache.log4j.PatternLayout; @@ -232,8 +232,8 @@ public void testReportForDutyWithMasterChange() throws Exception { */ @Test public void testReportForDutyWithRSRpcRetry() throws Exception { - ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = - new ScheduledThreadPoolExecutor(1, Threads.newDaemonThreadFactory("RSDelayedStart")); + ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1, + new ThreadFactoryBuilder().setNameFormat("RSDelayedStart-pool-%d").build()); // Start a master and wait for it to become the active/primary master. // Use a random unique port diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java index 717352708460..14ddc02a936f 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncFSWAL.java @@ -50,11 +50,11 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.CommonFSUtils; import org.apache.hadoop.hbase.util.FutureUtils; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WALEdit; import org.apache.hadoop.hbase.wal.WALKey; import org.apache.hadoop.hbase.wal.WALKeyImpl; import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -82,7 +82,8 @@ public class TestAsyncFSWAL extends AbstractTestFSWAL { @BeforeClass public static void setUpBeforeClass() throws Exception { - GROUP = new NioEventLoopGroup(1, Threads.newDaemonThreadFactory("TestAsyncFSWAL")); + GROUP = new NioEventLoopGroup(1, + new ThreadFactoryBuilder().setNameFormat("TestAsyncFSWAL-pool-%d").build()); CHANNEL_CLASS = NioSocketChannel.class; AbstractTestFSWAL.setUpBeforeClass(); } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java index 0740954f900d..a7d388a98c04 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestAsyncWALReplay.java @@ -25,9 +25,9 @@ import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.testclassification.MediumTests; import org.apache.hadoop.hbase.testclassification.RegionServerTests; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.wal.WAL; import org.apache.hadoop.hbase.wal.WALFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; @@ -51,7 +51,8 @@ public class TestAsyncWALReplay extends AbstractTestWALReplay { @BeforeClass public static void setUpBeforeClass() throws Exception { - GROUP = new NioEventLoopGroup(1, Threads.newDaemonThreadFactory("TestAsyncWALReplay")); + GROUP = new NioEventLoopGroup(1, + new ThreadFactoryBuilder().setNameFormat("TestAsyncWALReplay-pool-%d").build()); CHANNEL_CLASS = NioSocketChannel.class; Configuration conf = AbstractTestWALReplay.TEST_UTIL.getConfiguration(); conf.set(WALFactory.WAL_PROVIDER, "asyncfs"); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java index c29dee54e4c8..d66a822a1e88 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/util/TestHBaseFsckMOB.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.util.hbck.HFileCorruptionChecker; import org.apache.hadoop.hbase.util.hbck.HbckTestingUtil; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -64,8 +65,9 @@ public static void setUpBeforeClass() throws Exception { conf.setInt(HConstants.HBASE_RPC_TIMEOUT_KEY, 8 * REGION_ONLINE_TIMEOUT); TEST_UTIL.startMiniCluster(1); - tableExecutorService = new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), Threads.newDaemonThreadFactory("testhbck")); + tableExecutorService = + new ThreadPoolExecutor(1, POOL_SIZE, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new ThreadFactoryBuilder().setNameFormat("testhbck-pool-%d").build()); hbfsckExecutorService = new ScheduledThreadPoolExecutor(POOL_SIZE); diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java index 9162ff821fd1..237834e50f71 100644 --- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java +++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift/IncrementCoalescer.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.thrift.generated.TIncrement; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -150,9 +150,8 @@ public boolean equals(Object obj) { public IncrementCoalescer(ThriftHBaseServiceHandler hand) { this.handler = hand; LinkedBlockingQueue queue = new LinkedBlockingQueue<>(); - pool = new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, - TimeUnit.MILLISECONDS, queue, - Threads.newDaemonThreadFactory("IncrementCoalescer")); + pool = new ThreadPoolExecutor(CORE_POOL_SIZE, CORE_POOL_SIZE, 50, TimeUnit.MILLISECONDS, queue, + new ThreadFactoryBuilder().setNameFormat("IncrementCoalescer-pool-%d").build()); MBeans.register("thrift", "Thrift", this); } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java index fca8738778a4..bee99d26cf86 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKWatcher.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.security.Superusers; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.WatchedEvent; @@ -95,8 +95,8 @@ public class ZKWatcher implements Watcher, Abortable, Closeable { // and further prevents deadlocks if the process method itself makes other zookeeper calls. // It is ok to do it in a single thread because the Zookeeper ClientCnxn already serializes the // requests using a single while loop and hence there is no performance degradation. - private final ExecutorService zkEventProcessor = - Executors.newSingleThreadExecutor(Threads.getNamedThreadFactory("zk-event-processor")); + private final ExecutorService zkEventProcessor = Executors.newSingleThreadExecutor( + new ThreadFactoryBuilder().setNameFormat("zk-event-processor-pool-%d").build()); private final Configuration conf;