Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;

import static org.junit.Assert.assertEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;

import java.io.IOException;
import java.util.EnumSet;
Expand All @@ -33,9 +33,9 @@
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;

/**
* Test AddBlockOp is written and read correctly
Expand All @@ -47,7 +47,7 @@ public class TestAddBlock {
private MiniDFSCluster cluster;
private Configuration conf;

@Before
@BeforeEach
public void setup() throws IOException {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
Expand All @@ -56,7 +56,7 @@ public void setup() throws IOException {
cluster.waitActive();
}

@After
@AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
Expand Down Expand Up @@ -147,8 +147,7 @@ public void testAddBlockUC() throws Exception {
assertEquals(BLOCKSIZE, fileBlocks[0].getNumBytes());
assertEquals(BlockUCState.COMPLETE, fileBlocks[0].getBlockUCState());
assertEquals(appendContent.length() - 1, fileBlocks[1].getNumBytes());
assertEquals(BlockUCState.UNDER_CONSTRUCTION,
fileBlocks[1].getBlockUCState());
assertEquals(BlockUCState.UNDER_CONSTRUCTION, fileBlocks[1].getBlockUCState());
} finally {
if (out != null) {
out.close();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
package org.apache.hadoop.hdfs.server.namenode;


import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.EnumSet;

Expand All @@ -38,9 +38,9 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.util.RwLockMode;
import org.apache.hadoop.io.EnumSetWritable;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;

/**
Expand All @@ -56,7 +56,7 @@ public class TestAddBlockRetry {
private Configuration conf;
private MiniDFSCluster cluster;

@Before
@BeforeEach
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
Expand All @@ -65,7 +65,7 @@ public void setUp() throws Exception {
cluster.waitActive();
}

@After
@AfterEach
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
Expand Down Expand Up @@ -105,18 +105,17 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception {
}
DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
ns.getBlockManager(), src, null, null, null, r);
assertNotNull("Targets must be generated", targets);
assertNotNull(targets, "Targets must be generated");

// run second addBlock()
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertTrue("Penultimate block must be complete",
checkFileProgress(src, false));
assertTrue(checkFileProgress(src, false), "Penultimate block must be complete");
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
assertEquals(1, lbs.getLocatedBlocks().size(), "Must be one block");
LocatedBlock lb2 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
assertEquals(REPLICATION, lb2.getLocations().length, "Wrong replication");

// continue first addBlock()
ns.writeLock(RwLockMode.GLOBAL);
Expand All @@ -127,14 +126,14 @@ public void testRetryAddBlockWhileInChooseTarget() throws Exception {
} finally {
ns.writeUnlock(RwLockMode.GLOBAL, "testRetryAddBlockWhileInChooseTarget");
}
assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
assertEquals(lb2.getBlock(), newBlock.getBlock(), "Blocks are not equal");

// check locations
lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
assertEquals(1, lbs.getLocatedBlocks().size(), "Must be one block");
LocatedBlock lb1 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
assertEquals(REPLICATION, lb1.getLocations().length, "Wrong replication");
assertEquals(lb1.getBlock(), lb2.getBlock(), "Blocks are not equal");
}

boolean checkFileProgress(String src, boolean checkall) throws IOException {
Expand Down Expand Up @@ -165,14 +164,14 @@ public void testAddBlockRetryShouldReturnBlockWithLocations()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertTrue("Block locations should be present",
lb1.getLocations().length > 0);
assertTrue(lb1.getLocations().length > 0,
"Block locations should be present");

cluster.restartNameNode();
nameNodeRpc = cluster.getNameNodeRpc();
LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null, null);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
assertEquals(lb1.getBlock(), lb2.getBlock(), "Blocks are not equal");
assertTrue(lb2.getLocations().length > 0, "Wrong locations with retry");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,22 +34,22 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.hdfs.util.RwLockMode;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;

import java.io.IOException;
import java.util.Arrays;
import java.util.BitSet;
import java.util.List;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;

@Timeout(300)
public class TestAddOverReplicatedStripedBlocks {

private MiniDFSCluster cluster;
Expand All @@ -66,10 +66,7 @@ public class TestAddOverReplicatedStripedBlocks {
private final int blockSize = stripesPerBlock * cellSize;
private final int numDNs = groupSize + 3;

@Rule
public Timeout globalTimeout = new Timeout(300000);

@Before
@BeforeEach
public void setup() throws IOException {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
Expand All @@ -87,7 +84,7 @@ public void setup() throws IOException {
ecPolicy.getName());
}

@After
@AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
Expand Down Expand Up @@ -238,15 +235,15 @@ public void testProcessOverReplicatedAndCorruptStripedBlock()
for (byte index : bg.getBlockIndices()) {
set.set(index);
}
Assert.assertFalse(set.get(0));
assertFalse(set.get(0));
for (int i = 1; i < groupSize; i++) {
assertTrue(set.get(i));
}
}

// This test is going to be rewritten in HDFS-10854. Ignoring this test
// temporarily as it fails with the fix for HDFS-10301.
@Ignore
@Disabled
@Test
public void testProcessOverReplicatedAndMissingStripedBlock()
throws Exception {
Expand Down Expand Up @@ -295,7 +292,7 @@ public void testProcessOverReplicatedAndMissingStripedBlock()
for (byte index : bg.getBlockIndices()) {
set.set(index);
}
Assert.assertFalse(set.get(groupSize - 1));
assertFalse(set.get(groupSize - 1));
for (int i = 0; i < groupSize - 1; i++) {
assertTrue(set.get(i));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.Whitebox;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import org.mockito.Mockito;

import java.util.function.Supplier;

import java.io.IOException;

@Timeout(300)
public class TestAddStripedBlockInFBR {
private final ErasureCodingPolicy ecPolicy =
StripedFileTestUtil.getDefaultECPolicy();
Expand All @@ -55,10 +55,7 @@ public class TestAddStripedBlockInFBR {
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;

@Rule
public Timeout globalTimeout = new Timeout(300000);

@Before
@BeforeEach
public void setup() throws IOException {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
Expand All @@ -68,7 +65,7 @@ public void setup() throws IOException {
StripedFileTestUtil.getDefaultECPolicy().getName());
}

@After
@AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,23 +51,23 @@
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.Rule;
import org.junit.rules.Timeout;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;

import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;

@Timeout(300)
public class TestAddStripedBlocks {
private final ErasureCodingPolicy ecPolicy =
StripedFileTestUtil.getDefaultECPolicy();
Expand All @@ -80,10 +80,7 @@ public class TestAddStripedBlocks {
private MiniDFSCluster cluster;
private DistributedFileSystem dfs;

@Rule
public Timeout globalTimeout = new Timeout(300000);

@Before
@BeforeEach
public void setup() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(groupSize).build();
Expand All @@ -93,7 +90,7 @@ public void setup() throws IOException {
dfs.getClient().setErasureCodingPolicy("/", ecPolicy.getName());
}

@After
@AfterEach
public void tearDown() {
if (cluster != null) {
cluster.shutdown();
Expand Down Expand Up @@ -163,7 +160,8 @@ private static void writeAndFlushStripedOutputStream(
DFSTestUtil.flushInternal(out);
}

@Test (timeout=60000)
@Test
@Timeout(value = 60)
public void testAddStripedBlock() throws Exception {
final Path file = new Path("/file1");
// create an empty file
Expand Down Expand Up @@ -485,17 +483,17 @@ public void testStripedFlagInBlockLocation() throws IOException {
out.write("this is a replicated file".getBytes());
}
BlockLocation[] locations = dfs.getFileBlockLocations(replicated, 0, 100);
assertEquals("There should be exactly one Block present",
1, locations.length);
assertFalse("The file is Striped", locations[0].isStriped());
assertEquals(1, locations.length,
"There should be exactly one Block present");
assertFalse(locations[0].isStriped(), "The file is Striped");

Path striped = new Path("/blockLocation/striped");
try (FSDataOutputStream out = dfs.createFile(striped).recursive().build()) {
out.write("this is a striped file".getBytes());
}
locations = dfs.getFileBlockLocations(striped, 0, 100);
assertEquals("There should be exactly one Block present",
1, locations.length);
assertTrue("The file is not Striped", locations[0].isStriped());
assertEquals(1, locations.length,
"There should be exactly one Block present");
assertTrue(locations[0].isStriped(), "The file is not Striped");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.server.namenode;

import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
import org.junit.jupiter.api.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
Expand All @@ -27,9 +28,7 @@
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.FSNamesystemAuditLogger;
import org.apache.hadoop.test.GenericTestUtils;

import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.jupiter.api.Test;
import org.slf4j.event.Level;

import java.net.Inet4Address;
Expand All @@ -44,11 +43,9 @@
/**
* Test that the HDFS Audit logger respects DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST.
*/
@Timeout(300)
public class TestAuditLogAtDebug {
static final Logger LOG = LoggerFactory.getLogger(TestAuditLogAtDebug.class);

@Rule
public Timeout timeout = new Timeout(300000);

private static final String DUMMY_COMMAND_1 = "dummycommand1";
private static final String DUMMY_COMMAND_2 = "dummycommand2";
Expand Down
Loading