diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java index 6de026b9d17c0..5cc8f0accc1d7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java @@ -38,9 +38,9 @@ abstract public class FSOutputSummer extends OutputStream implements // data checksum private final DataChecksum sum; // internal buffer for storing data before it is checksumed - private byte buf[]; + private byte[] buf; // internal buffer for storing checksum - private byte checksum[]; + private byte[] checksum; // The number of valid bytes in the buffer. private int count; @@ -100,7 +100,7 @@ public synchronized void write(int b) throws IOException { * @exception IOException if an I/O error occurs. */ @Override - public synchronized void write(byte b[], int off, int len) + public synchronized void write(byte[] b, int off, int len) throws IOException { checkClosed(); @@ -117,7 +117,7 @@ public synchronized void write(byte b[], int off, int len) * Write a portion of an array, flushing to the underlying * stream at most once if necessary. */ - private int write1(byte b[], int off, int len) throws IOException { + private int write1(byte[] b, int off, int len) throws IOException { if(count==0 && len>=buf.length) { // local buffer is empty and user buffer size >= local buffer size, so // simply checksum the user buffer and send it directly to the underlying @@ -129,7 +129,7 @@ private int write1(byte b[], int off, int len) throws IOException { // copy user data to local buffer int bytesToCopy = buf.length-count; - bytesToCopy = (len(flag), createParent, replication, @@ -301,7 +299,6 @@ static DFSOutputStream newStreamForCreate(DFSClient dfsClient, String src, UnknownCryptoProtocolVersionException.class); if (e instanceof RetryStartFileException) { if (retryCount > 0) { - shouldRetry = true; retryCount--; } else { throw new IOException("Too many retries because of encryption" + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index 0f60027269d8f..3b7af3fe12364 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -62,7 +62,6 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.Callable; import java.util.concurrent.CompletionService; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; @@ -97,7 +96,7 @@ static class MultipleBlockingQueue { MultipleBlockingQueue(int numQueue, int queueSize) { queues = new ArrayList<>(numQueue); for (int i = 0; i < numQueue; i++) { - queues.add(new LinkedBlockingQueue(queueSize)); + queues.add(new LinkedBlockingQueue<>(queueSize)); } } @@ -480,7 +479,7 @@ private DatanodeInfo[] getExcludedNodes() { } } } - return excluded.toArray(new DatanodeInfo[excluded.size()]); + return excluded.toArray(DatanodeInfo.EMPTY_ARRAY); } private void allocateNewBlock() throws IOException { @@ -1316,12 +1315,9 @@ void flushAllInternals() throws IOException { // flush all data to Datanode final long toWaitFor = flushInternalWithoutWaitingAck(); future = flushAllExecutorCompletionService.submit( - new Callable() { - @Override - public Void call() throws Exception { - s.waitForAckedSeqno(toWaitFor); - return null; - } + () -> { + s.waitForAckedSeqno(toWaitFor); + return null; }); flushAllFuturesMap.put(future, i); } catch (Exception e) {