|
25 | 25 | import org.apache.hadoop.fs.FileSystem; |
26 | 26 | import org.apache.hadoop.fs.FileSystemTestHelper; |
27 | 27 | import org.apache.hadoop.fs.Path; |
28 | | -import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient; |
29 | 28 | import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer; |
30 | 29 | import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase; |
31 | 30 | import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; |
@@ -251,55 +250,33 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception { |
251 | 250 | clientConf.set(HADOOP_RPC_PROTECTION, "privacy"); |
252 | 251 | FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf); |
253 | 252 | doTest(fsPrivacy, PATH1); |
254 | | - for (int i = 0; i < 2; i++) { |
255 | | - DataNode dn = dataNodes.get(i); |
256 | | - SaslDataTransferClient saslClient = dn.getSaslClient(); |
257 | | - String qop = null; |
258 | | - // It may take some time for the qop to populate |
259 | | - // to all DNs, check in a loop. |
260 | | - for (int trial = 0; trial < 10; trial++) { |
261 | | - qop = saslClient.getTargetQOP(); |
262 | | - if (qop != null) { |
263 | | - break; |
264 | | - } |
265 | | - Thread.sleep(100); |
266 | | - } |
267 | | - assertEquals("auth", qop); |
268 | | - } |
| 253 | + long count = dataNodes.stream() |
| 254 | + .map(dn -> dn.getSaslClient().getTargetQOP()) |
| 255 | + .filter("auth"::equals) |
| 256 | + .count(); |
| 257 | + // For each datanode pipeline, targetQOPs of sasl clients in the first two |
| 258 | + // datanodes become equal to auth. |
| 259 | + // Note that it is not necessarily the case for all datanodes, |
| 260 | + // since a datanode may be always at the last position in pipelines. |
| 261 | + assertTrue("At least two qops should be auth", count >= 2); |
269 | 262 |
|
270 | 263 | clientConf.set(HADOOP_RPC_PROTECTION, "integrity"); |
271 | 264 | FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf); |
272 | 265 | doTest(fsIntegrity, PATH2); |
273 | | - for (int i = 0; i < 2; i++) { |
274 | | - DataNode dn = dataNodes.get(i); |
275 | | - SaslDataTransferClient saslClient = dn.getSaslClient(); |
276 | | - String qop = null; |
277 | | - for (int trial = 0; trial < 10; trial++) { |
278 | | - qop = saslClient.getTargetQOP(); |
279 | | - if (qop != null) { |
280 | | - break; |
281 | | - } |
282 | | - Thread.sleep(100); |
283 | | - } |
284 | | - assertEquals("auth", qop); |
285 | | - } |
| 266 | + count = dataNodes.stream() |
| 267 | + .map(dn -> dn.getSaslClient().getTargetQOP()) |
| 268 | + .filter("auth"::equals) |
| 269 | + .count(); |
| 270 | + assertTrue("At least two qops should be auth", count >= 2); |
286 | 271 |
|
287 | 272 | clientConf.set(HADOOP_RPC_PROTECTION, "authentication"); |
288 | 273 | FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf); |
289 | 274 | doTest(fsAuth, PATH3); |
290 | | - for (int i = 0; i < 3; i++) { |
291 | | - DataNode dn = dataNodes.get(i); |
292 | | - SaslDataTransferServer saslServer = dn.getSaslServer(); |
293 | | - String qop = null; |
294 | | - for (int trial = 0; trial < 10; trial++) { |
295 | | - qop = saslServer.getNegotiatedQOP(); |
296 | | - if (qop != null) { |
297 | | - break; |
298 | | - } |
299 | | - Thread.sleep(100); |
300 | | - } |
301 | | - assertEquals("auth", qop); |
302 | | - } |
| 275 | + count = dataNodes.stream() |
| 276 | + .map(dn -> dn.getSaslServer().getNegotiatedQOP()) |
| 277 | + .filter("auth"::equals) |
| 278 | + .count(); |
| 279 | + assertEquals("All qops should be auth", 3, count); |
303 | 280 | } finally { |
304 | 281 | if (cluster != null) { |
305 | 282 | cluster.shutdown(); |
|
0 commit comments