diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b9f8e07f67a5f..bbff1e06e02ff 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -975,6 +975,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
public static final String DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT =
"dfs.datanode.http.internal-proxy.port";
+ public static final String DFS_DATANODE_NETTY_WORKER_NUM_THREADS_KEY =
+ "dfs.datanode.netty.worker.threads";
+ public static final int DFS_DATANODE_NETTY_WORKER_NUM_THREADS_DEFAULT = 0;
public static final String DFS_DATANODE_MAX_RECEIVER_THREADS_KEY =
HdfsClientConfigKeys.DeprecatedKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY;
public static final int DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
index bb2757287fbe0..647786f5baa4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java
@@ -65,12 +65,15 @@
import java.security.GeneralSecurityException;
import java.util.Enumeration;
import java.util.Map;
+import java.util.concurrent.Executors;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETTY_WORKER_NUM_THREADS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_NETTY_WORKER_NUM_THREADS_DEFAULT;
/**
* Data node HTTP Server Class.
@@ -144,7 +147,15 @@ public DatanodeHttpServer(final Configuration conf,
confForCreate.set(FsPermission.UMASK_LABEL, "000");
this.bossGroup = new NioEventLoopGroup();
- this.workerGroup = new NioEventLoopGroup();
+ int workerCount = conf.getInt(DFS_DATANODE_NETTY_WORKER_NUM_THREADS_KEY,
+ DFS_DATANODE_NETTY_WORKER_NUM_THREADS_DEFAULT);
+ if (workerCount < 0) {
+ LOG.warn("The value of {} is less than 0, will use default value: {}",
+ DFS_DATANODE_NETTY_WORKER_NUM_THREADS_KEY, DFS_DATANODE_NETTY_WORKER_NUM_THREADS_DEFAULT);
+ workerCount = DFS_DATANODE_NETTY_WORKER_NUM_THREADS_DEFAULT;
+ }
+ this.workerGroup = new NioEventLoopGroup(workerCount, Executors.newCachedThreadPool());
+
this.externalHttpChannel = externalHttpChannel;
HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
final ChannelHandler[] handlers = getFilterHandlers(conf);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 94c3ea0cc9b0c..da52c9e514619 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -154,6 +154,14 @@
+
+ dfs.datanode.netty.worker.threads
+ 0
+
+ The number of Datanode http server worker threads.
+
+
+
dfs.datanode.handler.count
10