>.
+ * to hold a hostname and port combo). This class is also {@link Comparable}
* In implementation this class is a facade over Guava's {@link HostAndPort}.
* We cannot have Guava classes in our API hence this Type.
*/
@@ -83,7 +83,10 @@ public int hashCode() {
@Override
public int compareTo(Address that) {
int compare = this.getHostname().compareTo(that.getHostname());
- if (compare != 0) return compare;
+ if (compare != 0) {
+ return compare;
+ }
+
return this.getPort() - that.getPort();
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
index 4818efc1e9e1..63241c03b522 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
@@ -43,7 +43,7 @@ public class SpanReceiverHost {
private boolean closed = false;
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="SE_BAD_FIELD")
- private static enum SingletonHolder {
+ private enum SingletonHolder {
INSTANCE;
final Object lock = new Object();
SpanReceiverHost host = null; // FindBugs: SE_BAD_FIELD
@@ -64,14 +64,13 @@ public static SpanReceiverHost getInstance(Configuration conf) {
}
SpanReceiverHost(Configuration conf) {
- receivers = new HashSet();
+ receivers = new HashSet<>();
this.conf = conf;
}
/**
* Reads the names of classes specified in the {@code hbase.trace.spanreceiver.classes} property
* and instantiates and registers them with the Tracer.
- *
*/
public void loadSpanReceivers() {
String[] receiverNames = conf.getStrings(SPAN_RECEIVERS_CONF_KEY);
@@ -98,7 +97,10 @@ public void loadSpanReceivers() {
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
*/
public synchronized void closeReceivers() {
- if (closed) return;
+ if (closed) {
+ return;
+ }
+
closed = true;
for (SpanReceiver rcvr : receivers) {
try {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
index 7de1b1375a8b..f33e3ab4fdd3 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteRangeUtils.java
@@ -15,9 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.hbase.util;
+import com.google.common.collect.Lists;
+
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
@@ -26,19 +27,18 @@
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
-import com.google.common.collect.Lists;
-
/**
* Utility methods for working with {@link ByteRange}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ByteRangeUtils {
-
public static int numEqualPrefixBytes(ByteRange left, ByteRange right, int rightInnerOffset) {
int maxCompares = Math.min(left.getLength(), right.getLength() - rightInnerOffset);
- final byte[] lbytes = left.getBytes(), rbytes = right.getBytes();
- final int loffset = left.getOffset(), roffset = right.getOffset();
+ final byte[] lbytes = left.getBytes();
+ final byte[] rbytes = right.getBytes();
+ final int loffset = left.getOffset();
+ final int roffset = right.getOffset();
for (int i = 0; i < maxCompares; ++i) {
if (lbytes[loffset + i] != rbytes[roffset + rightInnerOffset + i]) {
return i;
@@ -49,7 +49,7 @@ public static int numEqualPrefixBytes(ByteRange left, ByteRange right, int right
public static ArrayList copyToNewArrays(Collection ranges) {
if (ranges == null) {
- return new ArrayList(0);
+ return new ArrayList<>(0);
}
ArrayList arrays = Lists.newArrayListWithCapacity(ranges.size());
for (ByteRange range : ranges) {
@@ -60,7 +60,7 @@ public static ArrayList copyToNewArrays(Collection ranges) {
public static ArrayList fromArrays(Collection arrays) {
if (arrays == null) {
- return new ArrayList(0);
+ return new ArrayList<>(0);
}
ArrayList ranges = Lists.newArrayListWithCapacity(arrays.size());
for (byte[] array : arrays) {
@@ -78,5 +78,4 @@ public static void write(OutputStream os, ByteRange byteRange, int byteRangeInne
os.write(byteRange.getBytes(), byteRange.getOffset() + byteRangeInnerOffset,
byteRange.getLength() - byteRangeInnerOffset);
}
-
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
index b52d3dc78b0f..ef6d48908a91 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/CommonFSUtils.java
@@ -44,10 +44,10 @@
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
+import org.apache.hadoop.ipc.RemoteException;
/**
* Utility methods for interacting with the underlying file system.
@@ -137,8 +137,7 @@ public static boolean isMatchingTail(final Path pathToSearch, final Path pathTai
* @return True if deleted dir
* @throws IOException e
*/
- public static boolean deleteDirectory(final FileSystem fs, final Path dir)
- throws IOException {
+ public static boolean deleteDirectory(final FileSystem fs, final Path dir) throws IOException {
return fs.exists(dir) && fs.delete(dir, true);
}
@@ -146,69 +145,22 @@ public static boolean deleteDirectory(final FileSystem fs, final Path dir)
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
*
- * use reflection to search for getDefaultBlockSize(Path f)
- * if the method doesn't exist, fall back to using getDefaultBlockSize()
- *
* @param fs filesystem object
* @return the default block size for the path's filesystem
- * @throws IOException e
*/
- public static long getDefaultBlockSize(final FileSystem fs, final Path path) throws IOException {
- Method m = null;
- Class extends FileSystem> cls = fs.getClass();
- try {
- m = cls.getMethod("getDefaultBlockSize", new Class>[] { Path.class });
- } catch (NoSuchMethodException e) {
- LOG.info("FileSystem doesn't support getDefaultBlockSize");
- } catch (SecurityException e) {
- LOG.info("Doesn't have access to getDefaultBlockSize on FileSystems", e);
- m = null; // could happen on setAccessible()
- }
- if (m == null) {
- return fs.getDefaultBlockSize(path);
- } else {
- try {
- Object ret = m.invoke(fs, path);
- return ((Long)ret).longValue();
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
+ public static long getDefaultBlockSize(final FileSystem fs, final Path path) {
+ return fs.getDefaultBlockSize(path);
}
/*
* Get the default replication.
*
- * use reflection to search for getDefaultReplication(Path f)
- * if the method doesn't exist, fall back to using getDefaultReplication()
- *
* @param fs filesystem object
* @param f path of file
* @return default replication for the path's filesystem
- * @throws IOException e
*/
- public static short getDefaultReplication(final FileSystem fs, final Path path)
- throws IOException {
- Method m = null;
- Class extends FileSystem> cls = fs.getClass();
- try {
- m = cls.getMethod("getDefaultReplication", new Class>[] { Path.class });
- } catch (NoSuchMethodException e) {
- LOG.info("FileSystem doesn't support getDefaultReplication");
- } catch (SecurityException e) {
- LOG.info("Doesn't have access to getDefaultReplication on FileSystems", e);
- m = null; // could happen on setAccessible()
- }
- if (m == null) {
- return fs.getDefaultReplication(path);
- } else {
- try {
- Object ret = m.invoke(fs, path);
- return ((Number)ret).shortValue();
- } catch (Exception e) {
- throw new IOException(e);
- }
- }
+ public static short getDefaultReplication(final FileSystem fs, final Path path) {
+ return fs.getDefaultReplication(path);
}
/**
@@ -358,11 +310,11 @@ public static Path getRootDir(final Configuration c) throws IOException {
return p.makeQualified(fs);
}
- public static void setRootDir(final Configuration c, final Path root) throws IOException {
+ public static void setRootDir(final Configuration c, final Path root) {
c.set(HConstants.HBASE_DIR, root.toString());
}
- public static void setFsDefault(final Configuration c, final Path root) throws IOException {
+ public static void setFsDefault(final Configuration c, final Path root) {
c.set("fs.defaultFS", root.toString()); // for hadoop 0.21+
}
@@ -387,7 +339,7 @@ public static Path getWALRootDir(final Configuration c) throws IOException {
}
@VisibleForTesting
- public static void setWALRootDir(final Configuration c, final Path root) throws IOException {
+ public static void setWALRootDir(final Configuration c, final Path root) {
c.set(HBASE_WAL_DIR, root.toString());
}
@@ -481,8 +433,7 @@ public static void setStoragePolicy(final FileSystem fs, final Configuration con
setStoragePolicy(fs, path, storagePolicy);
}
- private static final Map warningMap =
- new ConcurrentHashMap();
+ private static final Map warningMap = new ConcurrentHashMap<>();
/**
* Sets storage policy for given path.
@@ -557,71 +508,43 @@ static void setStoragePolicy(final FileSystem fs, final Path path, final String
*/
private static void invokeSetStoragePolicy(final FileSystem fs, final Path path,
final String storagePolicy) throws IOException {
- Method m = null;
Exception toThrow = null;
+
try {
- m = fs.getClass().getDeclaredMethod("setStoragePolicy",
- new Class>[] { Path.class, String.class });
- m.setAccessible(true);
- } catch (NoSuchMethodException e) {
- toThrow = e;
- final String msg = "FileSystem doesn't support setStoragePolicy; HDFS-6584 not available";
- if (!warningMap.containsKey(fs)) {
- warningMap.put(fs, true);
- LOG.warn(msg, e);
- } else if (LOG.isDebugEnabled()) {
- LOG.debug(msg, e);
+ fs.setStoragePolicy(path, storagePolicy);
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
}
- m = null;
- } catch (SecurityException e) {
+ } catch (Exception e) {
toThrow = e;
- final String msg = "No access to setStoragePolicy on FileSystem; HDFS-6584 not available";
+ // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
+ // misuse than a runtime problem with HDFS.
if (!warningMap.containsKey(fs)) {
warningMap.put(fs, true);
- LOG.warn(msg, e);
+ LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
} else if (LOG.isDebugEnabled()) {
- LOG.debug(msg, e);
+ LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
}
- m = null; // could happen on setAccessible()
- }
- if (m != null) {
- try {
- m.invoke(fs, path, storagePolicy);
+
+ // check for lack of HDFS-7228
+ if (e instanceof RemoteException &&
+ HadoopIllegalArgumentException.class.getName().equals(
+ ((RemoteException)e).getClassName())) {
if (LOG.isDebugEnabled()) {
- LOG.debug("Set storagePolicy=" + storagePolicy + " for path=" + path);
- }
- } catch (Exception e) {
- toThrow = e;
- // This swallows FNFE, should we be throwing it? seems more likely to indicate dev
- // misuse than a runtime problem with HDFS.
- if (!warningMap.containsKey(fs)) {
- warningMap.put(fs, true);
- LOG.warn("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
- } else if (LOG.isDebugEnabled()) {
- LOG.debug("Unable to set storagePolicy=" + storagePolicy + " for path=" + path, e);
- }
- // check for lack of HDFS-7228
- if (e instanceof InvocationTargetException) {
- final Throwable exception = e.getCause();
- if (exception instanceof RemoteException &&
- HadoopIllegalArgumentException.class.getName().equals(
- ((RemoteException)exception).getClassName())) {
- if (LOG.isDebugEnabled()) {
- LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
- "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
- "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
- "more information see the 'ArchivalStorage' docs for your Hadoop release.");
- }
- }
+ LOG.debug("Given storage policy, '" +storagePolicy +"', was rejected and probably " +
+ "isn't a valid policy for the version of Hadoop you're running. I.e. if you're " +
+ "trying to use SSD related policies then you're likely missing HDFS-7228. For " +
+ "more information see the 'ArchivalStorage' docs for your Hadoop release.");
}
}
}
+
if (toThrow != null) {
throw new IOException(toThrow);
}
}
-
/**
* @param conf must not be null
* @return True if this filesystem whose scheme is 'hdfs'.
@@ -647,8 +570,7 @@ public static boolean isRecoveredEdits(Path path) {
* @return Returns the filesystem of the hbase rootdir.
* @throws IOException from underlying FileSystem
*/
- public static FileSystem getCurrentFileSystem(Configuration conf)
- throws IOException {
+ public static FileSystem getCurrentFileSystem(Configuration conf) throws IOException {
return getRootDir(conf).getFileSystem(conf);
}
@@ -666,7 +588,7 @@ public static FileSystem getCurrentFileSystem(Configuration conf)
* @param filter path filter
* @return null if dir is empty or doesn't exist, otherwise FileStatus array
*/
- public static FileStatus [] listStatus(final FileSystem fs,
+ public static FileStatus[] listStatus(final FileSystem fs,
final Path dir, final PathFilter filter) throws IOException {
FileStatus [] status = null;
try {
@@ -753,13 +675,13 @@ public static boolean isExists(final FileSystem fs, final Path path) throws IOEx
* Log the current state of the filesystem from a certain root directory
* @param fs filesystem to investigate
* @param root root file/directory to start logging from
- * @param LOG log to output information
+ * @param log log to output information
* @throws IOException if an unexpected exception occurs
*/
- public static void logFileSystemState(final FileSystem fs, final Path root, Log LOG)
+ public static void logFileSystemState(final FileSystem fs, final Path root, Log log)
throws IOException {
- LOG.debug("Current file system:");
- logFSTree(LOG, fs, root, "|-");
+ log.debug("Current file system:");
+ logFSTree(log, fs, root, "|-");
}
/**
@@ -767,7 +689,7 @@ public static void logFileSystemState(final FileSystem fs, final Path root, Log
*
* @see #logFileSystemState(FileSystem, Path, Log)
*/
- private static void logFSTree(Log LOG, final FileSystem fs, final Path root, String prefix)
+ private static void logFSTree(Log log, final FileSystem fs, final Path root, String prefix)
throws IOException {
FileStatus[] files = listStatus(fs, root, null);
if (files == null) {
@@ -776,10 +698,10 @@ private static void logFSTree(Log LOG, final FileSystem fs, final Path root, Str
for (FileStatus file : files) {
if (file.isDirectory()) {
- LOG.debug(prefix + file.getPath().getName() + "/");
- logFSTree(LOG, fs, file.getPath(), prefix + "---");
+ log.debug(prefix + file.getPath().getName() + "/");
+ logFSTree(log, fs, file.getPath(), prefix + "---");
} else {
- LOG.debug(prefix + file.getPath().getName());
+ log.debug(prefix + file.getPath().getName());
}
}
}
@@ -791,25 +713,6 @@ public static boolean renameAndSetModifyTime(final FileSystem fs, final Path src
return fs.rename(src, dest);
}
- /**
- * Do our short circuit read setup.
- * Checks buffer size to use and whether to do checksumming in hbase or hdfs.
- * @param conf must not be null
- */
- public static void setupShortCircuitRead(final Configuration conf) {
- // Check that the user has not set the "dfs.client.read.shortcircuit.skip.checksum" property.
- boolean shortCircuitSkipChecksum =
- conf.getBoolean("dfs.client.read.shortcircuit.skip.checksum", false);
- boolean useHBaseChecksum = conf.getBoolean(HConstants.HBASE_CHECKSUM_VERIFICATION, true);
- if (shortCircuitSkipChecksum) {
- LOG.warn("Configuration \"dfs.client.read.shortcircuit.skip.checksum\" should not " +
- "be set to true." + (useHBaseChecksum ? " HBase checksum doesn't require " +
- "it, see https://issues.apache.org/jira/browse/HBASE-6868." : ""));
- assert !shortCircuitSkipChecksum; //this will fail if assertions are on
- }
- checkShortCircuitReadBufferSize(conf);
- }
-
/**
* Check if short circuit read buffer size is set and if not, set it to hbase value.
* @param conf must not be null
@@ -912,5 +815,4 @@ public StreamLacksCapabilityException(String message) {
super(message);
}
}
-
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
index f6fb4b908be3..93c29f90ca66 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcatenatedLists.java
@@ -79,7 +79,10 @@ public T next() {
if (!components.isEmpty()) {
this.nextWasCalled = true;
List src = components.get(currentComponent);
- if (++indexWithinComponent < src.size()) return src.get(indexWithinComponent);
+ if (++indexWithinComponent < src.size()) {
+ return src.get(indexWithinComponent);
+ }
+
if (++currentComponent < components.size()) {
indexWithinComponent = 0;
src = components.get(currentComponent);
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
index 9cbbe7e7c1f0..57d0aedacf71 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Order.java
@@ -29,22 +29,31 @@
@InterfaceAudience.Public
@InterfaceStability.Evolving
public enum Order {
-
ASCENDING {
@Override
- public int cmp(int cmp) { /* noop */ return cmp; }
+ public int cmp(int cmp) {
+ /* noop */ return cmp;
+ }
@Override
- public byte apply(byte val) { /* noop */ return val; }
+ public byte apply(byte val) {
+ /* noop */ return val;
+ }
@Override
- public void apply(byte[] val) { /* noop */ }
+ public void apply(byte[] val) {
+ /* noop */
+ }
@Override
- public void apply(byte[] val, int offset, int length) { /* noop */ }
+ public void apply(byte[] val, int offset, int length) {
+ /* noop */
+ }
@Override
- public String toString() { return "ASCENDING"; }
+ public String toString() {
+ return "ASCENDING";
+ }
},
DESCENDING {
@@ -55,23 +64,33 @@ public void apply(byte[] val, int offset, int length) { /* noop */ }
private static final byte MASK = (byte) 0xff;
@Override
- public int cmp(int cmp) { return -1 * cmp; }
+ public int cmp(int cmp) {
+ return -1 * cmp;
+ }
@Override
- public byte apply(byte val) { return (byte) (val ^ MASK); }
+ public byte apply(byte val) {
+ return (byte) (val ^ MASK);
+ }
@Override
public void apply(byte[] val) {
- for (int i = 0; i < val.length; i++) { val[i] ^= MASK; }
+ for (int i = 0; i < val.length; i++) {
+ val[i] ^= MASK;
+ }
}
@Override
public void apply(byte[] val, int offset, int length) {
- for (int i = 0; i < length; i++) { val[offset + i] ^= MASK; }
+ for (int i = 0; i < length; i++) {
+ val[offset + i] ^= MASK;
+ }
}
@Override
- public String toString() { return "DESCENDING"; }
+ public String toString() {
+ return "DESCENDING";
+ }
};
/**