diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java index 414d4ee7b49a..813c060b5200 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java @@ -101,11 +101,7 @@ public static CompletableFuture> getTableState(AsyncTable> getRegionLocation(AsyncTable metaTable, byte[] regionName) { CompletableFuture> future = new CompletableFuture<>(); @@ -127,11 +123,7 @@ public static CompletableFuture> getTableState(AsyncTable> getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { CompletableFuture> future = new CompletableFuture<>(); @@ -176,8 +168,9 @@ private static Optional getTableState(Result r) throws IOException { } /** - * Used to get all region locations for the specific table. n * @param tableName table we're - * looking for, can be null for getting all regions + * Used to get all region locations for the specific table + * @param metaTable scanner over meta table + * @param tableName table we're looking for, can be null for getting all regions * @return the list of region locations. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -200,8 +193,9 @@ public static CompletableFuture> getTableHRegionLocations( } /** - * Used to get table regions' info and server. n * @param tableName table we're looking for, can - * be null for getting all regions + * Used to get table regions' info and server. + * @param metaTable scanner over meta table + * @param tableName table we're looking for, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. @@ -259,9 +253,11 @@ void add(Result r) { } /** - * Performs a scan of META table for given table. n * @param tableName table withing we scan - * @param type scanned part of meta - * @param visitor Visitor invoked against each row + * Performs a scan of META table for given table. + * @param metaTable scanner over meta table + * @param tableName table within we scan + * @param type scanned part of meta + * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, TableName tableName, QueryType type, final Visitor visitor) { @@ -270,11 +266,13 @@ private static CompletableFuture scanMeta(AsyncTable scanMeta(AsyncTable metaTable, byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { @@ -410,9 +408,13 @@ private static Scan getMetaScan(AsyncTable metaTable, int rowUpperLimit) { * can't deserialize the result. */ private static Optional getRegionLocations(final Result r) { - if (r == null) return Optional.empty(); + if (r == null) { + return Optional.empty(); + } Optional regionInfo = getHRegionInfo(r, getRegionInfoColumn()); - if (!regionInfo.isPresent()) return Optional.empty(); + if (!regionInfo.isPresent()) { + return Optional.empty(); + } List locations = new ArrayList(1); NavigableMap> familyMap = r.getNoVersionMap(); @@ -420,15 +422,18 @@ private static Optional getRegionLocations(final Result r) { locations.add(getRegionLocation(r, regionInfo.get(), 0)); NavigableMap infoMap = familyMap.get(getCatalogFamily()); - if (infoMap == null) return Optional.of(new RegionLocations(locations)); + if (infoMap == null) { + return Optional.of(new RegionLocations(locations)); + } // iterate until all serverName columns are seen int replicaId = 0; byte[] serverColumn = getServerColumn(replicaId); - SortedMap serverMap = null; - serverMap = infoMap.tailMap(serverColumn, false); + SortedMap serverMap = infoMap.tailMap(serverColumn, false); - if (serverMap.isEmpty()) return Optional.of(new RegionLocations(locations)); + if (serverMap.isEmpty()) { + return Optional.of(new RegionLocations(locations)); + } for (Map.Entry entry : serverMap.entrySet()) { replicaId = parseReplicaIdFromServerColumn(entry.getKey()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 9b7a5de19bd3..8c675c4522e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -51,6 +51,7 @@ public byte[] toByteArray() { } /** + * Parse the serialized representation of the {@link ClusterId} * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray() */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 7ef8a2086118..630e3620a675 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -67,11 +67,11 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m .collect(Collectors.toList())) .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setTableName(ProtobufUtil.toProtoTableName(status.getKey())) .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build()) .collect(Collectors.toList())); if (metrics.getMasterName() != null) { - builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); + builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName())); } if (metrics.getMasterTasks() != null) { builder.addAllMasterTasks(metrics.getMasterTasks().stream() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index e218437694b4..e3b1a8ab6628 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -186,6 +186,7 @@ public List getRegionStatesInTransition() { } /** Returns the HBase version string as reported by the HMaster */ + @Override public String getHBaseVersion() { return metrics.getHBaseVersion(); } @@ -279,6 +280,7 @@ public ServerLoad getLoad(final ServerName sn) { return serverMetrics == null ? null : new ServerLoad(serverMetrics); } + @Override public String getClusterId() { return metrics.getClusterId(); } @@ -289,6 +291,7 @@ public List getMasterCoprocessorNames() { } /** + * Get the list of master coprocessor names. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link #getMasterCoprocessorNames} instead. */ @@ -299,6 +302,7 @@ public String[] getMasterCoprocessors() { } /** + * Get the last major compaction time for a given table. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link #getLastMajorCompactionTimestamp(TableName)} instead. */ @@ -308,6 +312,7 @@ public long getLastMajorCompactionTsForTable(TableName table) { } /** + * Get the last major compaction time for a given region. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link #getLastMajorCompactionTimestamp(byte[])} instead. */ @@ -317,6 +322,7 @@ public long getLastMajorCompactionTsForRegion(final byte[] region) { } /** + * Returns true if the balancer is enabled. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index 94909f1c14ef..32e06d610247 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -44,8 +44,8 @@ public interface CoprocessorEnvironment { int getLoadSequence(); /** - * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to - * set a configuration. + * Returns a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to + * set a configuration. */ Configuration getConfiguration(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java index 3484995c1bfd..47a86f9492f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java @@ -44,10 +44,7 @@ public HBaseServerException(boolean serverOverloaded, String message) { this.serverOverloaded = serverOverloaded; } - /** - * @param t throwable to check for server overloaded state - * @return True if the server was considered overloaded when the exception was thrown - */ + /** Returns True if the server was considered overloaded when the exception was thrown */ public static boolean isServerOverloaded(Throwable t) { if (t instanceof HBaseServerException) { return ((HBaseServerException) t).isServerOverloaded(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index d55b417d4825..43640858cccd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -176,6 +176,7 @@ protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) { } /** + * Check if a given family name is allowed. * @param b Family name. * @return b * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' @@ -205,19 +206,12 @@ public String getNameAsString() { return delegatee.getNameAsString(); } - /** - * @param key The key. - * @return The value. - */ @Override public byte[] getValue(byte[] key) { return delegatee.getValue(key); } - /** - * @param key The key. - * @return The value as a string. - */ + @Override public String getValue(String key) { byte[] value = getValue(Bytes.toBytes(key)); return value == null ? null : Bytes.toString(value); @@ -228,35 +222,22 @@ public Map getValues() { return delegatee.getValues(); } - /** - * @param key The key. - * @param value The value. - * @return this (for chained invocation) - */ public HColumnDescriptor setValue(byte[] key, byte[] value) { getDelegateeForModification().setValue(key, value); return this; } - /** - * @param key Key whose key and value we're to remove from HCD parameters. - */ public void remove(final byte[] key) { getDelegateeForModification().removeValue(new Bytes(key)); } - /** - * @param key The key. - * @param value The value. - * @return this (for chained invocation) - */ public HColumnDescriptor setValue(String key, String value) { getDelegateeForModification().setValue(key, value); return this; } /** - * @return compression type being used for the column family + * Returns compression type being used for the column family * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 * (HBASE-13655). Use * {@link #getCompressionType()}. @@ -267,7 +248,7 @@ public Compression.Algorithm getCompression() { } /** - * @return compression type being used for the column family for major compaction + * Returns compression type being used for the column family for major compaction * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 * (HBASE-13655). Use * {@link #getCompactionCompressionType()}. @@ -283,6 +264,7 @@ public int getMaxVersions() { } /** + * Set maximum versions to keep * @param value maximum number of versions * @return this (for chained invocation) */ @@ -320,6 +302,7 @@ public int getBlocksize() { } /** + * Set block size to use when writing * @param value Blocksize to use when writing out storefiles/hfiles on this column family. * @return this (for chained invocation) */ @@ -423,6 +406,7 @@ public boolean isInMemory() { } /** + * Set or clear the in memory flag. * @param value True if we are to favor keeping all values for this column family in the * HRegionServer cache * @return this (for chained invocation) @@ -438,6 +422,7 @@ public MemoryCompactionPolicy getInMemoryCompaction() { } /** + * Set the in memory compaction policy. * @param value the prefered in-memory compaction policy for this column family * @return this (for chained invocation) */ @@ -452,6 +437,7 @@ public KeepDeletedCells getKeepDeletedCells() { } /** + * Set the keep deleted cells policy. * @param value True if deleted rows should not be collected immediately. * @return this (for chained invocation) */ @@ -481,6 +467,7 @@ public int getTimeToLive() { } /** + * Set the time to live of cell contents * @param value Time-to-live of cell contents, in seconds. * @return this (for chained invocation) */ @@ -490,6 +477,7 @@ public HColumnDescriptor setTimeToLive(int value) { } /** + * Set the time to live of cell contents * @param value Time to live of cell contents, in human readable format * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit) * @return this (for chained invocation) @@ -505,6 +493,7 @@ public int getMinVersions() { } /** + * Set the minimum number of versions to keep. * @param value The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ @@ -519,6 +508,7 @@ public boolean isBlockCacheEnabled() { } /** + * Set or clear the block cache enabled flag. * @param value True if hfile DATA type blocks should be cached (We always cache INDEX and BLOOM * blocks; you cannot turn this off). * @return this (for chained invocation) @@ -534,6 +524,7 @@ public BloomType getBloomFilterType() { } /** + * Set the bloom filter type. * @param value bloom filter type * @return this (for chained invocation) */ @@ -547,10 +538,6 @@ public int getScope() { return delegatee.getScope(); } - /** - * @param value the scope tag - * @return this (for chained invocation) - */ public HColumnDescriptor setScope(int value) { getDelegateeForModification().setScope(value); return this; @@ -562,6 +549,7 @@ public boolean isCacheDataOnWrite() { } /** + * Set or clear the cache data on write flag. * @param value true if we should cache data blocks on write * @return this (for chained invocation) */ @@ -571,7 +559,7 @@ public HColumnDescriptor setCacheDataOnWrite(boolean value) { } /** - * This is a noop call from HBase 2.0 onwards + * Set or clear the cache in L1 flag. This is a noop call from HBase 2.0 onwards * @return this (for chained invocation) * @deprecated Since 2.0 and will be removed in 3.0 with out any replacement. Caching data in on * heap Cache, when there are both on heap LRU Cache and Bucket Cache will no longer @@ -588,6 +576,7 @@ public boolean isCacheIndexesOnWrite() { } /** + * Set or clear the cache indexes on write flag. * @param value true if we should cache index blocks on write * @return this (for chained invocation) */ @@ -602,6 +591,7 @@ public boolean isCacheBloomsOnWrite() { } /** + * Set or clear the cache bloom filters on write flag. * @param value true if we should cache bloomfilter blocks on write * @return this (for chained invocation) */ @@ -616,6 +606,7 @@ public boolean isEvictBlocksOnClose() { } /** + * Set or clear the evict bloom filters on close flag. * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ @@ -630,6 +621,7 @@ public boolean isPrefetchBlocksOnOpen() { } /** + * Set or clear the prefetch on open flag. * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ @@ -638,9 +630,6 @@ public HColumnDescriptor setPrefetchBlocksOnOpen(boolean value) { return this; } - /** - * @see java.lang.Object#toString() - */ @Override public String toString() { return delegatee.toString(); @@ -660,9 +649,6 @@ public static Map getDefaultValues() { return ColumnFamilyDescriptorBuilder.getDefaultValues(); } - /** - * @see java.lang.Object#equals(java.lang.Object) - */ @Override public boolean equals(Object obj) { if (this == obj) { @@ -674,9 +660,6 @@ public boolean equals(Object obj) { return false; } - /** - * @see java.lang.Object#hashCode() - */ @Override public int hashCode() { return delegatee.hashCode(); @@ -688,7 +671,7 @@ public int compareTo(HColumnDescriptor other) { } /** - * @return This instance serialized with pb with pb magic prefix + * Returns This instance serialized with pb with pb magic prefix * @see #parseFrom(byte[]) */ public byte[] toByteArray() { @@ -696,6 +679,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of a {@link HColumnDescriptor} * @param bytes A pb serialized {@link HColumnDescriptor} instance with pb magic prefix * @return An instance of {@link HColumnDescriptor} made from bytes n * @see * #toByteArray() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 5e9abd31a7e8..2f4d6377888f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -221,7 +221,7 @@ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[ /** * Construct HRegionInfo with explicit parameters - * @param tableName the table descriptor + * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions regions that may or @@ -234,37 +234,37 @@ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[ /** * Construct HRegionInfo with explicit parameters - * @param tableName the table descriptor + * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions regions that may or * may not hold references to this region. - * @param regionid Region id to use. n + * @param regionId Region id to use. */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split, final long regionid) throws IllegalArgumentException { - this(tableName, startKey, endKey, split, regionid, DEFAULT_REPLICA_ID); + final boolean split, final long regionId) throws IllegalArgumentException { + this(tableName, startKey, endKey, split, regionId, DEFAULT_REPLICA_ID); } /** * Construct HRegionInfo with explicit parameters - * @param tableName the table descriptor + * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions regions that may or * may not hold references to this region. - * @param regionid Region id to use. - * @param replicaId the replicaId to use n + * @param regionId Region id to use. + * @param replicaId the replicaId to use */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split, final long regionid, final int replicaId) throws IllegalArgumentException { + final boolean split, final long regionId, final int replicaId) throws IllegalArgumentException { super(); if (tableName == null) { throw new IllegalArgumentException("TableName cannot be null"); } this.tableName = tableName; this.offLine = false; - this.regionId = regionid; + this.regionId = regionId; this.replicaId = replicaId; if (this.replicaId > MAX_REPLICA_ID) { throw new IllegalArgumentException("ReplicaId cannot be greater than" + MAX_REPLICA_ID); @@ -280,7 +280,7 @@ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[ } /** - * Costruct a copy of another HRegionInfo n + * Construct a copy of another HRegionInfo */ public HRegionInfo(RegionInfo other) { super(); @@ -303,8 +303,10 @@ public HRegionInfo(HRegionInfo other, int replicaId) { } /** - * Make a region name of passed parameters. n * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null + * @param regionId Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format (such that it contains its * encoded name?). * @return Region name made of passed tableName, startKey and id @@ -314,12 +316,14 @@ public HRegionInfo(HRegionInfo other, int replicaId) { @Deprecated @InterfaceAudience.Private public static byte[] createRegionName(final TableName tableName, final byte[] startKey, - final long regionid, boolean newFormat) { - return RegionInfo.createRegionName(tableName, startKey, Long.toString(regionid), newFormat); + final long regionId, boolean newFormat) { + return RegionInfo.createRegionName(tableName, startKey, Long.toString(regionId), newFormat); } /** - * Make a region name of passed parameters. n * @param startKey Can be null + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format (such that it contains its * encoded name?). @@ -335,10 +339,12 @@ public static byte[] createRegionName(final TableName tableName, final byte[] st } /** - * Make a region name of passed parameters. n * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). n * @param - * newFormat should we create the region name in the new format (such that it - * contains its encoded name?). + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null + * @param regionId Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey, id and replicaId * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}. @@ -346,13 +352,15 @@ public static byte[] createRegionName(final TableName tableName, final byte[] st @Deprecated @InterfaceAudience.Private public static byte[] createRegionName(final TableName tableName, final byte[] startKey, - final long regionid, int replicaId, boolean newFormat) { - return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), + final long regionId, int replicaId, boolean newFormat) { + return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionId)), replicaId, newFormat); } /** - * Make a region name of passed parameters. n * @param startKey Can be null + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format (such that it contains its * encoded name?). @@ -368,9 +376,11 @@ public static byte[] createRegionName(final TableName tableName, final byte[] st } /** - * Make a region name of passed parameters. n * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). n * @param newFormat - * should we create the region name in the new format + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created) + * @param newFormat should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}. @@ -546,6 +556,7 @@ public boolean isSplit() { } /** + * Set or clear the split status flag. * @param split set split status */ public void setSplit(boolean split) { @@ -684,6 +695,7 @@ public static HRegionInfo convert(final HBaseProtos.RegionInfo proto) { } /** + * Serialize a {@link HRegionInfo} into a byte array. * @return This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use @@ -695,6 +707,7 @@ public byte[] toByteArray() { } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes * null * @see #toByteArray() @@ -708,6 +721,7 @@ public static HRegionInfo parseFromOrNull(final byte[] bytes) { } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes * null * @see #toByteArray() @@ -725,6 +739,7 @@ public static HRegionInfo parseFromOrNull(final byte[] bytes, int offset, int le } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @return A deserialized {@link HRegionInfo} n * @see #toByteArray() * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use @@ -736,6 +751,7 @@ public static HRegionInfo parseFrom(final byte[] bytes) throws DeserializationEx } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @param offset starting point in the byte array * @param len length to read on the byte array diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index 6ae93bb3954f..a7720b2734ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -110,8 +110,8 @@ public long getSeqNum() { } /** - * @return String made of hostname and port formatted as per - * {@link Addressing#createHostAndPortStr(String, int)} + * Returns String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 6c8ea810d03e..f5448e617372 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -801,6 +801,7 @@ public void removeCoprocessor(String className) { new HTableDescriptor(TableDescriptorBuilder.NAMESPACE_TABLEDESC); /** + * Set the table owner. * @deprecated since 0.94.1 * @see HBASE-6188 */ @@ -811,6 +812,7 @@ public HTableDescriptor setOwner(User owner) { } /** + * Set the table owner. * @deprecated since 0.94.1 * @see HBASE-6188 */ @@ -822,6 +824,7 @@ public HTableDescriptor setOwnerString(String ownerString) { } /** + * Get the table owner. * @deprecated since 0.94.1 * @see HBASE-6188 */ @@ -832,14 +835,14 @@ public String getOwnerString() { } /** - * @return This instance serialized with pb with pb magic prefix - * @see #parseFrom(byte[]) + * Returns This instance serialized with pb with pb magic prefix */ public byte[] toByteArray() { return TableDescriptorBuilder.toByteArray(delegatee); } /** + * Parse the serialized representation of a {@link HTableDescriptor} * @param bytes A pb serialized {@link HTableDescriptor} instance with pb magic prefix * @return An instance of {@link HTableDescriptor} made from bytes nn * @see * #toByteArray() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 01b5f49a2058..48476e4bb3af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -38,7 +38,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; @@ -155,6 +154,7 @@ public class MetaTableAccessor { private static final byte SEPARATED_BYTE = 0x00; @InterfaceAudience.Private + @SuppressWarnings("ImmutableEnumChecker") public enum QueryType { ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY), @@ -364,8 +364,8 @@ public static Result scanByRegionEncodedName(Connection connection, String regio } /** - * @return Return all regioninfos listed in the 'info:merge*' columns of the - * regionName row. + * Returns Return all regioninfos listed in the 'info:merge*' columns of the + * regionName row. */ @Nullable public static List getMergeRegions(Connection connection, byte[] regionName) @@ -381,8 +381,8 @@ public static boolean hasMergeRegions(Connection conn, byte[] regionName) throws } /** - * @return Deserialized values of <qualifier,regioninfo> pairs taken from column values that - * match the regex 'info:merge.*' in array of cells. + * Returns Deserialized values of <qualifier,regioninfo> pairs taken from column values that + * match the regex 'info:merge.*' in array of cells. */ @Nullable public static Map getMergeRegionsWithName(Cell[] cells) { @@ -408,8 +408,8 @@ public static Map getMergeRegionsWithName(Cell[] cells) { } /** - * @return Deserialized regioninfo values taken from column values that match the regex - * 'info:merge.*' in array of cells. + * Returns Deserialized regioninfo values taken from column values that match the regex + * 'info:merge.*' in array of cells. */ @Nullable public static List getMergeRegions(Cell[] cells) { @@ -418,8 +418,8 @@ public static List getMergeRegions(Cell[] cells) { } /** - * @return True if any merge regions present in cells; i.e. the column in - * cell matches the regex 'info:merge.*'. + * Returns True if any merge regions present in cells; i.e. the column in + * cell matches the regex 'info:merge.*'. */ public static boolean hasMergeRegions(Cell[] cells) { for (Cell cell : cells) { @@ -483,6 +483,7 @@ public static List getTableRegions(Connection connection, TableName return getListOfRegionInfos(result); } + @SuppressWarnings("MixedMutabilityReturnType") private static List getListOfRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) { @@ -496,8 +497,7 @@ public static List getTableRegions(Connection connection, TableName } /** - * @param tableName table we're working with - * @return start row for scanning META according to query type + * Returns start row for scanning META according to query type */ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) { if (tableName == null) { @@ -518,8 +518,7 @@ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type } /** - * @param tableName table we're working with - * @return stop row for scanning META according to query type + * Returns stop row for scanning META according to query type */ public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) { if (tableName == null) { @@ -641,9 +640,10 @@ void add(Result r) { } /** + * Get the user regions a given server is hosting. * @param connection connection we're using * @param serverName server whose regions we're interested in - * @return List of user regions installed on this server (does not include catalog regions). n + * @return List of user regions installed on this server (does not include catalog regions). */ public static NavigableMap getServerUserRegions(Connection connection, final ServerName serverName) throws IOException { @@ -1284,7 +1284,7 @@ public final boolean visit(Result rowResult) throws IOException { if (info == null) { return true; } - if (!(info.getTable().equals(tableName))) { + if (!info.getTable().equals(tableName)) { return false; } return super.visit(rowResult); @@ -1321,14 +1321,14 @@ private static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo spli if (splitA != null) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitA)) - .build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(RegionInfo.toByteArray(splitA)).build()); } if (splitB != null) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitB)) - .build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(RegionInfo.toByteArray(splitB)).build()); } return put; } @@ -1509,8 +1509,8 @@ static Put addMergeRegions(Put put, Collection mergeRegions) throws String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier)) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri)) - .build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(RegionInfo.toByteArray(ri)).build()); } return put; } @@ -1852,7 +1852,7 @@ public static void deleteMergeQualifiers(Connection connection, final RegionInfo public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) .setFamily(getCatalogFamily()).setQualifier(HConstants.REGIONINFO_QUALIFIER) - .setTimestamp(p.getTimestamp()).setType(Type.Put) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put) // Serialize the Default Replica HRI otherwise scan of hbase:meta // shows an info:regioninfo value with encoded name and region // name that differs from that of the hbase;meta row. @@ -1872,8 +1872,8 @@ public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replica .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getStartcode())).build()) .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) - .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()).setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)).build()); + .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(openSeqNum)).build()); } private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) { @@ -1922,7 +1922,7 @@ private static void addReplicationParent(Put put, List parents) thro byte[] value = getParentsBytes(parents); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(value).build()); } public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts) @@ -1938,7 +1938,7 @@ public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openS public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(openSeqNum)) .build()); } @@ -1946,8 +1946,8 @@ public static Put addEmptyLocation(Put p, int replicaId) throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); return p .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) - .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()).setType(Type.Put) - .build()) + .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).build()) .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Cell.Type.Put).build()) @@ -2096,7 +2096,7 @@ private static void debugLogMutation(Mutation p) throws IOException { private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException { return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(openSeqNum)) .build()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 0a762bf78a6a..b45171e64956 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -354,6 +354,7 @@ public long getLastMajorCompactionTs() { } /** Returns the reference count for the stores of this region */ + @Override public int getStoreRefCount() { return metrics.getStoreRefCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 4d6dd6d43fa3..4c0390c6c3be 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -208,6 +208,7 @@ public RegionLocations removeElementsWithNullLocation() { * @param other the locations to merge with * @return an RegionLocations object with merged locations or the same object if nothing is merged */ + @SuppressWarnings("ReferenceEquality") public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -280,6 +281,7 @@ private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, * @return an RegionLocations object with updated locations or the same object if nothing is * updated */ + @SuppressWarnings("ReferenceEquality") public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, boolean force) { assert location != null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 645a31a85523..d915e7a32cac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -107,8 +107,8 @@ default String getNameAsString() { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files of this - * region + * Returns the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 2320c8e908ed..714a14125538 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -79,16 +79,17 @@ public ServerLoad(ServerMetrics metrics) { for (RegionMetrics rl : metrics.getRegionMetrics().values()) { stores += rl.getStoreCount(); storefiles += rl.getStoreFileCount(); - storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storefileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE); - memstoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE); + storeUncompressedSizeMB += (int) rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storefileSizeMB += (int) rl.getStoreFileSize().get(Size.Unit.MEGABYTE); + memstoreSizeMB += (int) rl.getMemStoreSize().get(Size.Unit.MEGABYTE); readRequestsCount += rl.getReadRequestCount(); filteredReadRequestsCount += rl.getFilteredReadRequestCount(); writeRequestsCount += rl.getWriteRequestCount(); - storefileIndexSizeKB += rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE); - rootIndexSizeKB += rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); - totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); - totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); + storefileIndexSizeKB += (long) rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE); + rootIndexSizeKB += (int) rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + totalStaticIndexSizeKB += + (int) rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + totalStaticBloomSizeKB += (int) rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); totalCompactingKVs += rl.getCompactingCellCount(); currentCompactedKVs += rl.getCompactedCellCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 72f78220c1a8..1e57857db69e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -44,10 +44,6 @@ @InterfaceAudience.Private public final class ServerMetricsBuilder { - /** - * @param sn the server name - * @return a empty metrics - */ public static ServerMetrics of(ServerName sn) { return newBuilder(sn).build(); } @@ -280,6 +276,7 @@ public int getVersionNumber() { return versionNumber; } + @Override public String getVersion() { return version; } @@ -383,15 +380,17 @@ public String toString() { int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); - uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); - memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); - storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + uncompressedStoreFileSizeMB += + (long) r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storeFileSizeMB += (long) r.getStoreFileSize().get(Size.Unit.MEGABYTE); + memStoreSizeMB += (long) r.getMemStoreSize().get(Size.Unit.MEGABYTE); + storefileIndexSizeKB += + (long) r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); readRequestsCount += r.getReadRequestCount(); writeRequestsCount += r.getWriteRequestCount(); filteredReadRequestsCount += r.getFilteredReadRequestCount(); - rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); - bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE); + rootLevelIndexSizeKB += (long) r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + bloomFilterSizeMB += (long) r.getBloomFilterSize().get(Size.Unit.MEGABYTE); compactedCellCount += r.getCompactedCellCount(); compactingCellCount += r.getCompactingCellCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 05108c70e746..681b1f416c78 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -50,8 +50,8 @@ interface ClientMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor service requests made by - * the user + * Returns the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index ab63f19fec85..4a66283146d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.hbase.util.Strings; @@ -30,7 +31,8 @@ public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { - UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); + UserMetricsBuilder builder = + UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes(StandardCharsets.UTF_8)); userLoad.getClientMetricsList().stream() .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index bb44defbac6a..b0a33eda4021 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -27,11 +27,9 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE(0), - MULTI(1); + SINGLE, + MULTI; - ResponseType(int value) { - } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index bc1febe38031..4e97dcab24dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -135,7 +135,7 @@ private void populateStubs(Set addrs) throws IOException { * Typically, you can use lambda expression to implement this interface as * *
-   * (c, s, d) -> s.xxx(c, your request here, d)
+   * (c, s, d) -> s.xxx(c, your request here, d)
    * 
*/ @FunctionalInterface diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 6a0913c23c73..a2f34ebbd8e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -116,6 +116,7 @@ public interface Admin extends Abortable, Closeable { Connection getConnection(); /** + * Check if a table exists. * @param tableName Table to check. * @return true if table exists already. * @throws IOException if a remote or network exception occurs @@ -267,8 +268,8 @@ default TableName[] listTableNames(Pattern pattern) throws IOException { * Get a table descriptor. * @param tableName as a {@link TableName} * @return the read-only tableDescriptor - * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws TableNotFoundException if the table was not found + * @throws IOException if a remote or network exception occurs * @deprecated since 2.0 version and will be removed in 3.0 version. Use * {@link #getDescriptor(TableName)}. */ @@ -280,8 +281,8 @@ HTableDescriptor getTableDescriptor(TableName tableName) * Get a table descriptor. * @param tableName as a {@link TableName} * @return the tableDescriptor - * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws TableNotFoundException if the table was not found + * @throws IOException if a remote or network exception occurs */ TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; @@ -290,7 +291,7 @@ HTableDescriptor getTableDescriptor(TableName tableName) * @param desc table descriptor for table * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -316,7 +317,7 @@ default void createTable(TableDescriptor desc) throws IOException { * @throws IOException if a remote or network exception * occurs * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -335,7 +336,7 @@ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRe * split keys are repeated and if the * split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -574,6 +575,7 @@ default void disableTable(TableName tableName) throws IOException { HTableDescriptor[] disableTables(Pattern pattern) throws IOException; /** + * Check if a table is enabled. * @param tableName name of table to check * @return true if table is on-line * @throws IOException if a remote or network exception occurs @@ -581,6 +583,7 @@ default void disableTable(TableName tableName) throws IOException { boolean isTableEnabled(TableName tableName) throws IOException; /** + * Check if a table is disabled. * @param tableName name of table to check * @return true if table is off-line * @throws IOException if a remote or network exception occurs @@ -588,6 +591,7 @@ default void disableTable(TableName tableName) throws IOException { boolean isTableDisabled(TableName tableName) throws IOException; /** + * Check if a table is available. * @param tableName name of table to check * @return true if all regions of the table are available * @throws IOException if a remote or network exception occurs @@ -1646,6 +1650,7 @@ default ClusterMetrics getClusterMetrics() throws IOException { ClusterMetrics getClusterMetrics(EnumSet