diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index c2b03cdaad79c..20e6df426c42d 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -2435,6 +2435,20 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { } } + /** + * Requests the namenode to refresh protected directories from config. + * See {@link ClientProtocol#refreshProtectedDirectories()} + * for more details. + * + * @see ClientProtocol#refreshProtectedDirectories() + */ + public void refreshProtectedDirectories() throws IOException { + checkOpen(); + try (TraceScope ignored = tracer.newScope("refreshProtectedDirectories")) { + namenode.refreshProtectedDirectories(); + } + } + /** * @see ClientProtocol#finalizeUpgrade() */ diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index fe2d077977ca2..6c85b33e679bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -2012,6 +2012,15 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { dfs.setBalancerBandwidth(bandwidth); } + /** + * Requests the namenode to refresh protected directories from config. + * + * @throws IOException + */ + public void refreshProtectedDirectories() throws IOException { + dfs.refreshProtectedDirectories(); + } + /** * Get a canonical service name for this file system. If the URI is logical, * the hostname part of the URI will be returned. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index ea90645ca082b..4a74c0dae0a9c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -1041,6 +1041,14 @@ CorruptFileBlocks listCorruptFileBlocks(String path, String cookie) @Idempotent void setBalancerBandwidth(long bandwidth) throws IOException; + /** + * Tell namenode to refresh protected directories from config. + * + * @throws IOException If an I/O error occurred. + */ + @Idempotent + void refreshProtectedDirectories() throws IOException; + /** * Get the file info for a specific file or directory. * @param src The string representation of the path to the file diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index 0886636419b67..fed32572c851c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -173,6 +173,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.OpenFilesBatchResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshProtectedDirectoriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCacheDirectiveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; @@ -289,6 +290,10 @@ public class ClientNamenodeProtocolTranslatorPB implements private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST = RefreshNodesRequestProto.newBuilder().build(); + private final static RefreshProtectedDirectoriesRequestProto + VOID_REFRESH_PROTECTED_DIR_REQUEST = + RefreshProtectedDirectoriesRequestProto.newBuilder().build(); + private final static FinalizeUpgradeRequestProto VOID_FINALIZE_UPGRADE_REQUEST = FinalizeUpgradeRequestProto.newBuilder().build(); @@ -1185,6 +1190,16 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { } } + @Override + public void refreshProtectedDirectories() throws IOException { + try { + rpcProxy.refreshProtectedDirectories(null, + VOID_REFRESH_PROTECTED_DIR_REQUEST); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } + @Override public boolean isMethodSupported(String methodName) throws IOException { return RpcClientUtil.isMethodSupported(rpcProxy, diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto index 20967cc13ab86..6fc189a135b8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto @@ -761,6 +761,12 @@ message SetBalancerBandwidthResponseProto { // void response message GetDataEncryptionKeyRequestProto { // no parameters } +message RefreshProtectedDirectoriesRequestProto { // no parameters +} + +message RefreshProtectedDirectoriesResponseProto { // void response +} + message GetDataEncryptionKeyResponseProto { optional DataEncryptionKeyProto dataEncryptionKey = 1; } @@ -982,6 +988,8 @@ service ClientNamenodeProtocol { returns(hadoop.common.CancelDelegationTokenResponseProto); rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto) returns(SetBalancerBandwidthResponseProto); + rpc refreshProtectedDirectories(RefreshProtectedDirectoriesRequestProto) + returns(RefreshProtectedDirectoriesResponseProto); rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto) returns(GetDataEncryptionKeyResponseProto); rpc createSnapshot(CreateSnapshotRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java index 2a3bd735885e3..6975ebbc19bdb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterClientProtocol.java @@ -1188,6 +1188,14 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { rpcClient.invokeConcurrent(nss, method, true, false); } + @Override + public void refreshProtectedDirectories() throws IOException { + rpcServer.checkOperation(NameNode.OperationCategory.UNCHECKED); + RemoteMethod method = new RemoteMethod("refreshProtectedDirectories"); + final Set nss = namenodeResolver.getNamespaces(); + rpcClient.invokeConcurrent(nss, method, true, true); + } + @Override public ContentSummary getContentSummary(String path) throws IOException { rpcServer.checkOperation(NameNode.OperationCategory.READ); diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 1d0800e4bd833..928b231b9b7ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1211,6 +1211,11 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { clientProto.setBalancerBandwidth(bandwidth); } + @Override // ClientProtocol + public void refreshProtectedDirectories() throws IOException { + clientProto.refreshProtectedDirectories(); + } + @Override // ClientProtocol public ContentSummary getContentSummary(String path) throws IOException { return clientProto.getContentSummary(path); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 5132afaa4b15c..52775b7499ae4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -25,6 +25,8 @@ import java.util.Map; import java.util.stream.Collectors; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshProtectedDirectoriesRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshProtectedDirectoriesResponseProto; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.ProtocolStringList; import org.apache.hadoop.classification.InterfaceAudience; @@ -445,6 +447,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements VOID_SATISFYSTORAGEPOLICY_RESPONSE = SatisfyStoragePolicyResponseProto .getDefaultInstance(); + private static final RefreshProtectedDirectoriesResponseProto + VOID_REFRESHPROTECTEDDIRECTORIES_RESPONSE = + RefreshProtectedDirectoriesResponseProto.newBuilder().build(); + /** * Constructor * @@ -1258,6 +1264,18 @@ public SetBalancerBandwidthResponseProto setBalancerBandwidth( } } + @Override + public RefreshProtectedDirectoriesResponseProto refreshProtectedDirectories( + RpcController controller, RefreshProtectedDirectoriesRequestProto req) + throws ServiceException { + try { + server.refreshProtectedDirectories(); + return VOID_REFRESHPROTECTEDDIRECTORIES_RESPONSE; + } catch (IOException e) { + throw new ServiceException(e); + } + } + @Override public GetDataEncryptionKeyResponseProto getDataEncryptionKey( RpcController controller, GetDataEncryptionKeyRequestProto request) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 0e15921ba4953..b64f3bd2cc9f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -18,10 +18,9 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; -import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.hdfs.util.ProtectedDirsConfigReader; import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; @@ -525,23 +524,9 @@ public boolean isImageLoaded() { */ @VisibleForTesting static SortedSet parseProtectedDirectories(Configuration conf) { - return parseProtectedDirectories(conf - .getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES)); - } - - /** - * Parse configuration setting dfs.namenode.protected.directories to retrieve - * the set of protected directories. - * - * @param protectedDirsString - * a comma separated String representing a bunch of paths. - * @return a TreeSet - */ - @VisibleForTesting - static SortedSet parseProtectedDirectories( - final String protectedDirsString) { - return parseProtectedDirectories(StringUtils - .getTrimmedStringCollection(protectedDirsString)); + return parseProtectedDirectories( + ProtectedDirsConfigReader.parseProtectedDirsFromConfig( + conf.getTrimmed(FS_PROTECTED_DIRECTORIES))); } private static SortedSet parseProtectedDirectories( @@ -560,22 +545,16 @@ public boolean isProtectedSubDirectoriesEnable() { } /** - * Set directories that cannot be removed unless empty, even by an + * Refresh directories that cannot be removed unless empty, even by an * administrator. * - * @param protectedDirsString - * comma separated list of protected directories */ - String setProtectedDirectories(String protectedDirsString) { - if (protectedDirsString == null) { - protectedDirectories = new TreeSet<>(); - } else { - protectedDirectories = parseProtectedDirectories(protectedDirsString); - } - - return Joiner.on(",").skipNulls().join(protectedDirectories); + void refreshProtectedDirectories(Configuration newConf) { + LOG.info("Refresh protected directories from config file"); + protectedDirectories = parseProtectedDirectories(newConf); } + BlockManager getBlockManager() { return getFSNamesystem().getBlockManager(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index e98a59d7283e9..40179af25b3bc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4981,6 +4981,14 @@ void setBalancerBandwidth(long bandwidth) throws IOException { logAuditEvent(true, operationName, null); } + void refreshProtectedDirectories() throws IOException { + String operationName = "refreshProtectedDirs"; + checkOperation(OperationCategory.UNCHECKED); + checkSuperuserPrivilege(operationName); + getFSDirectory().refreshProtectedDirectories(new HdfsConfiguration()); + logAuditEvent(true, operationName, null); + } + boolean setSafeMode(SafeModeAction action) throws IOException { String operationName = action.toString().toLowerCase(); boolean error = false; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 54671ea6cf4ae..f2362e253fa49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -171,7 +171,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT; @@ -320,7 +319,6 @@ public enum OperationCategory { .newTreeSet(Lists.newArrayList( DFS_HEARTBEAT_INTERVAL_KEY, DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, - FS_PROTECTED_DIRECTORIES, HADOOP_CALLER_CONTEXT_ENABLED_KEY, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY, DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, @@ -2170,8 +2168,6 @@ protected String reconfigurePropertyImpl(String property, String newVal) return reconfHeartbeatInterval(datanodeManager, property, newVal); } else if (property.equals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)) { return reconfHeartbeatRecheckInterval(datanodeManager, property, newVal); - } else if (property.equals(FS_PROTECTED_DIRECTORIES)) { - return reconfProtectedDirectories(newVal); } else if (property.equals(HADOOP_CALLER_CONTEXT_ENABLED_KEY)) { return reconfCallerContextEnabled(newVal); } else if (property.equals(ipcClientRPCBackoffEnable)) { @@ -2296,9 +2292,6 @@ private String reconfHeartbeatRecheckInterval( } } - private String reconfProtectedDirectories(String newVal) { - return getNamesystem().getFSDirectory().setProtectedDirectories(newVal); - } private String reconfCallerContextEnabled(String newVal) { Boolean callerContextEnabled; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 70b12b3e198ff..e2df89f76a3fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -1453,7 +1453,19 @@ public void setBalancerBandwidth(long bandwidth) throws IOException { checkNNStartup(); namesystem.setBalancerBandwidth(bandwidth); } - + + /** + * Tell namenode to refresh protected directories from config. + * + * @throws IOException If an I/O error occurred. + */ + @Override // ClientProtocol + public void refreshProtectedDirectories() throws IOException { + checkNNStartup(); + namesystem.refreshProtectedDirectories(); + } + + @Override // ClientProtocol public ContentSummary getContentSummary(String path) throws IOException { checkNNStartup(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index f8ff5fe99392a..d0b57785ebe52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -458,6 +458,7 @@ static int run(DistributedFileSystem dfs, String[] argv, int idx) throws IOExcep "\t[-deleteBlockPool datanode_host:ipc_port blockpoolId [force]]\n"+ "\t[-setBalancerBandwidth ]\n" + "\t[-getBalancerBandwidth ]\n" + + "\t[-refreshProtectedDirectories]\n" + "\t[-fetchImage ]\n" + "\t[-allowSnapshot ]\n" + "\t[-disallowSnapshot ]\n" + @@ -1091,6 +1092,48 @@ public int getBalancerBandwidth(String[] argv, int idx) throws IOException { return 0; } + /** + * Command to ask the namenode to set protected directories + * Usage: hdfs dfsadmin -refreshProtectedDirectories. + * @exception IOException + */ + public int refreshProtectedDirectories() throws IOException { + int exitCode = -1; + + DistributedFileSystem dfs = getDFS(); + Configuration dfsConf = dfs.getConf(); + URI dfsUri = dfs.getUri(); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); + + if (isHaEnabled) { + String nsId = dfsUri.getHost(); + List> proxies = + HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, + nsId, ClientProtocol.class); + List exceptions = new ArrayList<>(); + for (ProxyAndInfo proxy: proxies) { + try{ + proxy.getProxy().refreshProtectedDirectories(); + System.out.println("Refresh protected directories successful for " + + proxy.getAddress()); + }catch (IOException ioe){ + System.out.println("Refresh protected directories failed for " + + proxy.getAddress()); + exceptions.add(ioe); + } + } + if(!exceptions.isEmpty()){ + throw MultipleIOException.createIOException(exceptions); + } + } else { + dfs.refreshProtectedDirectories(); + System.out.println("Refresh nodes successful"); + } + exitCode = 0; + + return exitCode; + } + /** * Download the most recent fsimage from the name node, and save it to a local * file in the given directory. @@ -1256,6 +1299,9 @@ private void printHelp(String cmd) { "\tduring HDFS block balancing.\n\n" + "\t--- NOTE: This value is not persistent on the DataNode.---\n"; + String refreshProtectedDirectories = "-refreshProtectedDirectories:\n" + + "\tRefresh protected directories form config.\n"; + String fetchImage = "-fetchImage :\n" + "\tDownloads the most recent fsimage from the Name Node and saves it in" + "\tthe specified local directory.\n"; @@ -1353,6 +1399,8 @@ private void printHelp(String cmd) { System.out.println(setBalancerBandwidth); } else if ("getBalancerBandwidth".equals(cmd)) { System.out.println(getBalancerBandwidth); + } else if ("refreshProtectedDirectories".equals(cmd)) { + System.out.println(refreshProtectedDirectories); } else if ("fetchImage".equals(cmd)) { System.out.println(fetchImage); } else if ("allowSnapshot".equalsIgnoreCase(cmd)) { @@ -1400,6 +1448,7 @@ private void printHelp(String cmd) { System.out.println(deleteBlockPool); System.out.println(setBalancerBandwidth); System.out.println(getBalancerBandwidth); + System.out.println(refreshProtectedDirectories); System.out.println(fetchImage); System.out.println(allowSnapshot); System.out.println(disallowSnapshot); @@ -2189,6 +2238,9 @@ private static void printUsage(String cmd) { } else if ("-getBalancerBandwidth".equalsIgnoreCase(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-getBalancerBandwidth ]"); + } else if ("-refreshProtectedDirectories".equalsIgnoreCase(cmd)) { + System.err.println("Usage: hdfs dfsadmin" + + " [-refreshProtectedDirectories]"); } else if ("-fetchImage".equals(cmd)) { System.err.println("Usage: hdfs dfsadmin" + " [-fetchImage ]"); @@ -2349,6 +2401,11 @@ public int run(String[] argv) { printUsage(cmd); return exitCode; } + } else if ("-refreshProtectedDirectories".equalsIgnoreCase(cmd)) { + if (argv.length != 1) { + printUsage(cmd); + return exitCode; + } } else if ("-fetchImage".equals(cmd)) { if (argv.length != 2) { printUsage(cmd); @@ -2438,6 +2495,8 @@ public int run(String[] argv) { exitCode = setBalancerBandwidth(argv, i); } else if ("-getBalancerBandwidth".equals(cmd)) { exitCode = getBalancerBandwidth(argv, i); + } else if ("-refreshProtectedDirectories".equals(cmd)) { + exitCode = refreshProtectedDirectories(); } else if ("-fetchImage".equals(cmd)) { exitCode = fetchImage(argv, i); } else if ("-shutdownDatanode".equals(cmd)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ProtectedDirsConfigReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ProtectedDirsConfigReader.java new file mode 100644 index 0000000000000..9a402fb3fb6d6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ProtectedDirsConfigReader.java @@ -0,0 +1,138 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.util; + +import java.io.*; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.util.Collections; +import java.util.Set; +import java.util.HashSet; + +import org.apache.commons.logging.LogFactory; +import org.apache.commons.logging.Log; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.util.StringUtils; + +@InterfaceAudience.Private +public class ProtectedDirsConfigReader { + + private static final Log LOG = LogFactory + .getLog(ProtectedDirsConfigReader.class); + + private Set currentDirectories; + + public ProtectedDirsConfigReader(String configFile) + throws IOException { + currentDirectories = new HashSet<>(); + loadConfig(configFile); + } + + private void readFileToSet(String filename, + Set set) throws IOException { + URI uri = URI.create(filename); + File file = uri.isAbsolute() ? new File(uri) : new File(filename); + InputStream fis = Files.newInputStream(file.toPath()); + readFileToSetWithFileInputStream(filename, fis, set); + } + + private void readFileToSetWithFileInputStream(String filename, + InputStream fileInputStream, Set set) + throws IOException { + BufferedReader reader = null; + try { + reader = new BufferedReader( + new InputStreamReader(fileInputStream, StandardCharsets.UTF_8)); + String line; + while ((line = reader.readLine()) != null) { + String[] paths = line.split("[ \t\n\f\r]+"); + if (paths != null) { + for (int i = 0; i < paths.length; i++) { + paths[i] = paths[i].trim(); + if (paths[i].startsWith("#")) { + // Everything from now on is a comment + break; + } + if (!paths[i].isEmpty()) { + LOG.info("Adding " + paths[i] + " to the list of " + + " protected directories from " + filename); + set.add(paths[i]); + } + } + } + } + } finally { + if (reader != null) { + reader.close(); + } + fileInputStream.close(); + } + } + + private synchronized void loadConfig(String configFile) + throws IOException { + LOG.info("Loading protected directories"); + Set newDirs = new HashSet(); + + if (!configFile.isEmpty()) { + readFileToSet(configFile, newDirs); + currentDirectories = Collections.unmodifiableSet(newDirs); + } + } + + /** + * to get protected directories. + * + * @return currentDirectories + */ + public synchronized Set getProtectedDirectories() { + return currentDirectories; + } + + public static Set parseProtectedDirsFromConfig( + String protectedDirsString) { + if (protectedDirsString == null) { + return new HashSet<>(); + } + + Set dirs = new HashSet<>(); + for (String pathStr : + StringUtils.getTrimmedStringCollection(protectedDirsString)) { + if (!pathStr.startsWith("file://")) { + dirs.add(pathStr); + } else { + try { + ProtectedDirsConfigReader reader = + new ProtectedDirsConfigReader(pathStr); + dirs.addAll(reader.getProtectedDirectories()); + } catch (NoSuchFileException ex) { + LOG.warn("The protected directories config flle is not found in " + + pathStr); + } catch (IOException ex) { + LOG.error( + "Error in parseProtectedDirsFromConfig", + ex); + } + } + } + return dirs; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java index ea68ee705bafb..63251231cf48a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java @@ -18,6 +18,11 @@ package org.apache.hadoop.hdfs.server.namenode; +import java.io.File; +import java.io.FileWriter; +import java.net.URI; +import java.util.stream.Collectors; +import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -31,7 +36,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.hdfs.server.namenode.FSDirectory; +import org.junit.After; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -56,9 +61,25 @@ public class TestProtectedDirectories { static final Logger LOG = LoggerFactory.getLogger( TestProtectedDirectories.class); + // Using /test/build/data/tmp directory to store temprory files + private final String pathTestDir = GenericTestUtils.getTestDir() + .getAbsolutePath(); + + private String configFile = "file://" + pathTestDir + "/protected.dir.config"; + private String newConfigFile = configFile + "_new"; + + @Rule public Timeout timeout = new Timeout(300000); + + @After + public void tearDown() throws Exception { + // Delete test files after running tests + new File(URI.create(configFile)).delete(); + new File(URI.create(newConfigFile)).delete(); + } + /** * Start a namenode-only 'cluster' which is configured to protect * the given list of directories. @@ -73,10 +94,65 @@ public MiniDFSCluster setupTestCase(Configuration conf, Collection unProtectedDirs) throws Throwable { // Initialize the configuration. - conf.set( - CommonConfigurationKeys.FS_PROTECTED_DIRECTORIES, + conf.set(FS_PROTECTED_DIRECTORIES, Joiner.on(",").skipNulls().join(protectedDirs)); + return setupTestCluster(conf, protectedDirs, unProtectedDirs); + } + + /** + * Start a namenode-only 'cluster' which is configured to protect + * the given list of directories + * and set fs.protected.directories.config.file.enable = true. + * @param conf + * @param protectedDirsConfigFile + * @param protectedDirsPaths + * @param protectedDirsPathsInFile + * @param unProtectedDirs + * @return + * @throws IOException + */ + public MiniDFSCluster setupTestMixtureConfigureCase(Configuration conf, + String protectedDirsConfigFile, + Collection protectedDirsPaths, + Collection protectedDirsPathsInFile, + Collection unProtectedDirs) + throws Throwable { + + // Store protectedDirsInFile to configFile + storeProtectedDirs2Config(protectedDirsConfigFile, + protectedDirsPathsInFile); + + // Initialize the configuration. + List confStrList = new ArrayList<>(protectedDirsPaths); + if (protectedDirsConfigFile != null) { + confStrList.add(protectedDirsConfigFile); + } + conf.set( + FS_PROTECTED_DIRECTORIES, + Joiner.on(",").skipNulls().join(confStrList)); + + return setupTestCluster(conf, + protectedDirsPaths.stream().map(Path::new).collect( + Collectors.toList()), + unProtectedDirs.stream().map(Path::new).collect( + Collectors.toList())); + } + + /** + * Start a namenode-only 'cluster' which is configured to protect + * the given list of directories. + * @param conf + * @param protectedDirs + * @param unProtectedDirs + * @return + * @throws IOException + */ + public MiniDFSCluster setupTestCluster(Configuration conf, + Collection protectedDirs, + Collection unProtectedDirs) + throws Throwable { + // Start the cluster. MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); @@ -94,7 +170,6 @@ public MiniDFSCluster setupTestCase(Configuration conf, throw t; } } - /** * Initialize a collection of file system layouts that will be used * as the test matrix. @@ -234,26 +309,24 @@ public void testReconfigureProtectedPaths() throws Throwable { NameNode nn = cluster.getNameNode(); + FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); + // change properties - nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew); + conf.setStrings(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew); + fsDirectory.refreshProtectedDirectories(conf); - FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); // verify change assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), protectedPathsNew, fsDirectory.getProtectedDirectories()); - assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), - protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); - // revert to default - nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null); + conf.unset(FS_PROTECTED_DIRECTORIES); + fsDirectory.refreshProtectedDirectories(conf); // verify default assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), new TreeSet(), fsDirectory.getProtectedDirectories()); - assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), - null, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); } @Test @@ -502,6 +575,94 @@ public void testBadPathsInConfig() { paths.size(), is(0)); } + + @Test + public void testMixtureConfigureProtectedPaths() throws Throwable { + Configuration conf = new HdfsConfiguration(); + + Collection protectedPaths = Arrays.asList("/a", "/b", "/c"); + + Collection protectedPathsInFile = Arrays.asList("/d", "/e", "/f"); + + MiniDFSCluster cluster = setupTestMixtureConfigureCase(conf, + configFile, + protectedPaths, + protectedPathsInFile, + Collections.EMPTY_LIST); + + NameNode nn = cluster.getNameNode(); + + FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); + + TreeSet allProtectedPathSet = new TreeSet<>(protectedPaths); + + allProtectedPathSet.addAll(protectedPathsInFile); + + // verify + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + allProtectedPathSet, + fsDirectory.getProtectedDirectories()); + + } + + @Test + public void testReconfigureProtectedPathsFromFile() throws Throwable { + Configuration conf = new HdfsConfiguration(); + + Collection protectedPaths = Arrays.asList("/a", "/b", "/c"); + + Collection protectedPathsInFile = Arrays.asList("/d", "/e", "/f"); + + MiniDFSCluster cluster = setupTestMixtureConfigureCase(conf, + configFile, + protectedPaths, + protectedPathsInFile, + Collections.EMPTY_LIST); + + NameNode nn = cluster.getNameNode(); + + FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); + + SortedSet protectedPathsNew = new TreeSet<>( + FSDirectory.normalizePaths(Arrays.asList("/aa", "/bb", "/cc"), + FS_PROTECTED_DIRECTORIES)); + + // update protectedPathsNew to th config file + storeProtectedDirs2Config(configFile, protectedPathsNew); + + // refresh the content of the configuration file + fsDirectory.refreshProtectedDirectories(conf); + + TreeSet allProtectedPathSet = new TreeSet<>(protectedPaths); + allProtectedPathSet.addAll(protectedPathsNew); + + // verify change + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + allProtectedPathSet, fsDirectory.getProtectedDirectories()); + + // prepare protectedPaths in newConfigFile + protectedPathsNew = new TreeSet<>( + FSDirectory.normalizePaths(Arrays.asList("/dd", "/ee", "/ff"), + FS_PROTECTED_DIRECTORIES)); + storeProtectedDirs2Config(newConfigFile, protectedPathsNew); + + // set fs.protected.directories to file:///path/to/newConfigFile + conf.set(FS_PROTECTED_DIRECTORIES, newConfigFile); + fsDirectory.refreshProtectedDirectories(conf); + + // verify change + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + protectedPathsNew, fsDirectory.getProtectedDirectories()); + + // revert to default + conf.unset(FS_PROTECTED_DIRECTORIES); + fsDirectory.refreshProtectedDirectories(conf); + + // verify + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + new TreeSet(), fsDirectory.getProtectedDirectories()); + } + /** * Return true if the path was successfully deleted. False if it * failed with AccessControlException. Any other exceptions are @@ -555,6 +716,22 @@ private boolean renamePath(FileSystem fs, Path srcPath, Path dstPath) } } + /** + * store protectedPaths to the configuration file . + * @param file + * @param protectedPaths + * @throws IOException + */ + private void storeProtectedDirs2Config(String file, + Collection protectedPaths) + throws IOException { + try (FileWriter ifw = new FileWriter(new File(URI.create(file)))) { + for (String dir : protectedPaths) { + ifw.write(dir + "\n"); + } + } + } + private static class TestMatrixEntry { // true if the path can be deleted. final Map protectedPaths = Maps.newHashMap(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 736d66f2f4a78..5963f505e499c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -430,7 +430,7 @@ public void testNameNodeGetReconfigurableProperties() throws IOException { final List outs = Lists.newArrayList(); final List errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(13, outs.size()); + assertEquals(12, outs.size()); assertEquals(DFS_BLOCK_PLACEMENT_EC_CLASSNAME_KEY, outs.get(1)); assertEquals(DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, outs.get(2)); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(3)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestProtectedDirsConfigReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestProtectedDirsConfigReader.java new file mode 100644 index 0000000000000..4a58beccc5841 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestProtectedDirsConfigReader.java @@ -0,0 +1,187 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.util; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.File; +import java.io.FileWriter; +import java.nio.file.NoSuchFileException; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +/** + * Test for ProtectedDirsConfigReader. + */ +public class TestProtectedDirsConfigReader { + + // Using /test/build/data/tmp directory to store temprory files. + private final String pathTestDir = GenericTestUtils + .getTestDir().getAbsolutePath(); + + private String configFile = pathTestDir + "/protected.dir.config"; + + + @After + public void tearDown() throws Exception { + // Delete test files after running tests + new File(configFile).delete(); + } + + /* + * 1.Create protected.dirs.config file + * 2.Write path names per line + * 3.Write comments starting with # + * 4.Close file + * 5.Compare if number of paths reported by ProtectedDirConfigFileReader + * are equal to the number of paths written + */ + @Test + public void testProtectedDirConfigFileReader() throws Exception { + + FileWriter cfw = new FileWriter(configFile); + + cfw.write("#PROTECTED-DIRS-LIST\n"); + cfw.write("/dira1/dira2\n"); + cfw.write("/dirb1/dirb2/dirb3\n"); + cfw.write("/dirc1/dirc2/dirc3\n"); + cfw.write("#This-is-comment\n"); + cfw.write("/dird1/dird2 # /diri1/diri2\n"); + cfw.write("/dird1/dird2 /dire1/dire2\n"); + cfw.close(); + + ProtectedDirsConfigReader hfp = new ProtectedDirsConfigReader(configFile); + + int dirsLen = hfp.getProtectedDirectories().size(); + + assertEquals(5, dirsLen); + assertTrue(hfp.getProtectedDirectories().contains("/dire1/dire2")); + assertFalse(hfp.getProtectedDirectories().contains("/dirh1/dirh2")); + + } + + /* + * Test creating a new ProtectedDirConfigFileReader with nonexistent files + */ + @Test + public void testCreateReaderWithNonexistentFile() throws Exception { + try { + new ProtectedDirsConfigReader( + pathTestDir + "/doesnt-exist"); + Assert.fail("Should throw FileNotFoundException"); + } catch (NoSuchFileException ex) { + // Exception as expected + } + } + + + /* + * Test for null file + */ + @Test + public void testProtectedDirConfigFileReaderWithNull() throws Exception { + FileWriter cfw = new FileWriter(configFile); + + cfw.close(); + + ProtectedDirsConfigReader hfp = new ProtectedDirsConfigReader(configFile); + + int dirsLen = hfp.getProtectedDirectories().size(); + + // TestCase1: Check if lines beginning with # are ignored + assertEquals(0, dirsLen); + + // TestCase2: Check if given path names are reported + // by getProtectedProtectedDirs. + assertFalse(hfp.getProtectedDirectories().contains("/dire1/dire2")); + } + + /* + * Check if only comments can be written to paths file + */ + @Test + public void testProtectedDirConfigFileReaderWithCommentsOnly() + throws Exception { + FileWriter cfw = new FileWriter(configFile); + + cfw.write("#PROTECTED-DIRS-LIST\n"); + cfw.write("#This-is-comment\n"); + + cfw.close(); + + ProtectedDirsConfigReader hfp = new ProtectedDirsConfigReader(configFile); + + int dirsLen = hfp.getProtectedDirectories().size(); + + assertEquals(0, dirsLen); + assertFalse(hfp.getProtectedDirectories().contains("/dire1/dire2")); + + } + + /* + * Test if spaces are allowed in path names + */ + @Test + public void testProtectedDirConfigFileReaderWithSpaces() throws Exception { + + FileWriter cfw = new FileWriter(configFile); + + cfw.write("#PROTECTED-DIRS-LIST\n"); + cfw.write(" somepath /dirb1/dirb2/dirb3"); + cfw.write(" /dirc1/dirc2/dirc3 # /dird1/dird2"); + cfw.close(); + + ProtectedDirsConfigReader hfp = new ProtectedDirsConfigReader(configFile); + + int dirsLen = hfp.getProtectedDirectories().size(); + + assertEquals(3, dirsLen); + assertTrue(hfp.getProtectedDirectories().contains("/dirc1/dirc2/dirc3")); + assertFalse(hfp.getProtectedDirectories().contains("/dire1/dire2")); + assertFalse(hfp.getProtectedDirectories().contains("/dird1/dird2")); + + } + + /* + * Test if spaces , tabs and new lines are allowed + */ + @Test + public void testProtectedDirConfigFileReaderWithTabs() throws Exception { + FileWriter cfw = new FileWriter(configFile); + + cfw.write("#PROTECTED-DIRS-LIST\n"); + cfw.write(" \n"); + cfw.write(" somepath \t /dirb1/dirb2/dirb3 \n /dird1/dird2"); + cfw.write(" /dirc1/dirc2/dirc3 \t # /dire1/dire2"); + cfw.close(); + + ProtectedDirsConfigReader hfp = new ProtectedDirsConfigReader(configFile); + + int dirsLen = hfp.getProtectedDirectories().size(); + + assertEquals(4, dirsLen); + assertTrue(hfp.getProtectedDirectories().contains("/dirb1/dirb2/dirb3")); + assertFalse(hfp.getProtectedDirectories().contains("/dire1/dire2")); + + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java index a814035e0f514..3c97a8b90eb6f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestTools.java @@ -80,7 +80,7 @@ public void testDFSAdminInvalidUsageHelp() { "-rollEdits", "-restoreFailedStorage", "-refreshNodes", "-finalizeUpgrade", "-metasave", "-refreshUserToGroupsMappings", "-printTopology", "-refreshNamenodes", "-deleteBlockPool", - "-setBalancerBandwidth", "-fetchImage"); + "-setBalancerBandwidth", "-refreshProtectedDirectories", "-fetchImage"); try { for (String arg : args) assertTrue(ToolRunner.run(new DFSAdmin(), fillArgs(arg)) == -1);