diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
index 155f0249daa2..40db1c1ac044 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java
@@ -856,10 +856,24 @@ void unassign(byte[] regionName, boolean force)
* the request was submitted successfully. We need to check logs for the details of which regions
* were split/merged.
*
- * @return true if region normalizer ran, false otherwise.
+ * @return {@code true} if region normalizer ran, {@code false} otherwise.
* @throws IOException if a remote or network exception occurs
*/
- boolean normalize() throws IOException;
+ default boolean normalize() throws IOException {
+ return normalize(new NormalizeTableFilterParams.Builder().build());
+ }
+
+ /**
+ * Invoke region normalizer. Can NOT run for various reasons. Check logs.
+ * This is a non-blocking invocation to region normalizer. If return value is true, it means
+ * the request was submitted successfully. We need to check logs for the details of which regions
+ * were split/merged.
+ *
+ * @param ntfp limit to tables matching the specified filter.
+ * @return {@code true} if region normalizer ran, {@code false} otherwise.
+ * @throws IOException if a remote or network exception occurs
+ */
+ boolean normalize(NormalizeTableFilterParams ntfp) throws IOException;
/**
* Query the current state of the region normalizer.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
index 1255753f05e0..1b7a24bb36a4 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdminOverAsyncAdmin.java
@@ -395,8 +395,8 @@ public CacheEvictionStats clearBlockCache(TableName tableName) throws IOExceptio
}
@Override
- public boolean normalize() throws IOException {
- return get(admin.normalize());
+ public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException {
+ return get(admin.normalize(ntfp));
}
@Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
index 336903d42e38..8c877e9c943a 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -1279,7 +1279,17 @@ default CompletableFuture balance() {
* @return true if region normalizer ran, false otherwise. The return value will be wrapped by a
* {@link CompletableFuture}
*/
- CompletableFuture normalize();
+ default CompletableFuture normalize() {
+ return normalize(new NormalizeTableFilterParams.Builder().build());
+ }
+
+ /**
+ * Invoke region normalizer. Can NOT run for various reasons. Check logs.
+ * @param ntfp limit to tables matching the specified filter.
+ * @return true if region normalizer ran, false otherwise. The return value will be wrapped by a
+ * {@link CompletableFuture}
+ */
+ CompletableFuture normalize(NormalizeTableFilterParams ntfp);
/**
* Turn the cleaner chore on/off.
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
index c29fe7118dd7..2301d4a811c8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -709,8 +709,8 @@ public CompletableFuture isNormalizerEnabled() {
}
@Override
- public CompletableFuture normalize() {
- return wrap(rawAdmin.normalize());
+ public CompletableFuture normalize(NormalizeTableFilterParams ntfp) {
+ return wrap(rawAdmin.normalize(ntfp));
}
@Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java
new file mode 100644
index 000000000000..982ec5b0065b
--- /dev/null
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.client;
+
+import java.util.List;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.yetus.audience.InterfaceAudience;
+
+/**
+ * A collection of criteria used for table selection. The logic of table selection is as follows:
+ *
+ * -
+ * When no parameter values are provided, an unfiltered list of all user tables is returned.
+ *
+ * -
+ * When a list of {@link TableName TableNames} are provided, the filter starts with any of
+ * these tables that exist.
+ *
+ * -
+ * When a {@code namespace} name is provided, the filter starts with all the tables present in
+ * that namespace.
+ *
+ * -
+ * If both a list of {@link TableName TableNames} and a {@code namespace} name are provided,
+ * the {@link TableName} list is honored and the {@code namespace} name is ignored.
+ *
+ * -
+ * If a {@code regex} is provided, this subset of {@link TableName TableNames} is further
+ * reduced to those that match the provided regular expression.
+ *
+ *
+ */
+@InterfaceAudience.Public
+public final class NormalizeTableFilterParams {
+ private final List tableNames;
+ private final String regex;
+ private final String namespace;
+
+ private NormalizeTableFilterParams(final List tableNames, final String regex,
+ final String namespace) {
+ this.tableNames = tableNames;
+ this.regex = regex;
+ this.namespace = namespace;
+ }
+
+ public List getTableNames() {
+ return tableNames;
+ }
+
+ public String getRegex() {
+ return regex;
+ }
+
+ public String getNamespace() {
+ return namespace;
+ }
+
+ /**
+ * Used to instantiate an instance of {@link NormalizeTableFilterParams}.
+ */
+ public static class Builder {
+ private List tableNames;
+ private String regex;
+ private String namespace;
+
+ public Builder tableFilterParams(final NormalizeTableFilterParams ntfp) {
+ this.tableNames = ntfp.getTableNames();
+ this.regex = ntfp.getRegex();
+ this.namespace = ntfp.getNamespace();
+ return this;
+ }
+
+ public Builder tableNames(final List tableNames) {
+ this.tableNames = tableNames;
+ return this;
+ }
+
+ public Builder regex(final String regex) {
+ this.regex = regex;
+ return this;
+ }
+
+ public Builder namespace(final String namespace) {
+ this.namespace = namespace;
+ return this;
+ }
+
+ public NormalizeTableFilterParams build() {
+ return new NormalizeTableFilterParams(tableNames, regex, namespace);
+ }
+ }
+}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
index 1330b2a17562..d740a3a26b02 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java
@@ -3286,14 +3286,18 @@ public CompletableFuture isNormalizerEnabled() {
}
@Override
- public CompletableFuture normalize() {
+ public CompletableFuture normalize(NormalizeTableFilterParams ntfp) {
+ return normalize(RequestConverter.buildNormalizeRequest(ntfp));
+ }
+
+ private CompletableFuture normalize(NormalizeRequest request) {
return this
- . newMasterCaller()
- .action(
- (controller, stub) -> this. call(
- controller, stub, RequestConverter.buildNormalizeRequest(),
- (s, c, req, done) -> s.normalize(c, req, done), (resp) -> resp.getNormalizerRan()))
- .call();
+ . newMasterCaller()
+ .action(
+ (controller, stub) -> this.call(
+ controller, stub, request, MasterService.Interface::normalize,
+ NormalizeResponse::getNormalizerRan))
+ .call();
}
@Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
index 4a6adb180bc1..ff202913b042 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java
@@ -2293,6 +2293,13 @@ public static HBaseProtos.TableName toProtoTableName(TableName tableName) {
.setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())).build();
}
+ public static List toProtoTableNameList(List tableNameList) {
+ if (tableNameList == null) {
+ return new ArrayList<>();
+ }
+ return tableNameList.stream().map(ProtobufUtil::toProtoTableName).collect(Collectors.toList());
+ }
+
public static List toTableNameList(List tableNamesList) {
if (tableNamesList == null) {
return new ArrayList<>();
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
index 7b0282afc080..1352b7714bc7 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -46,6 +46,7 @@
import org.apache.hadoop.hbase.client.LogQueryFilter;
import org.apache.hadoop.hbase.client.MasterSwitchType;
import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec;
import org.apache.hadoop.hbase.client.RegionInfo;
@@ -1476,8 +1477,18 @@ public static RegionOpenInfo buildRegionOpenInfo(RegionInfo region, List allEnabledTables =
- new ArrayList<>(tableStateManager.getTablesInStates(TableState.State.ENABLED));
- Collections.shuffle(allEnabledTables);
+ final Set matchingTables = getTableDescriptors(new LinkedList<>(),
+ ntfp.getNamespace(), ntfp.getRegex(), ntfp.getTableNames(), false)
+ .stream()
+ .map(TableDescriptor::getTableName)
+ .collect(Collectors.toSet());
+ final Set allEnabledTables =
+ tableStateManager.getTablesInStates(TableState.State.ENABLED);
+ final List targetTables =
+ new ArrayList<>(Sets.intersection(matchingTables, allEnabledTables));
+ Collections.shuffle(targetTables);
final List submittedPlanProcIds = new ArrayList<>();
- for (TableName table : allEnabledTables) {
+ for (TableName table : targetTables) {
if (table.isSystemTable()) {
continue;
}
@@ -3399,9 +3409,9 @@ public List listTableNames(final String namespace, final String regex
}
/**
- * @return list of table table descriptors after filtering by regex and whether to include system
- * tables, etc.
- * @throws IOException
+ * Return a list of table table descriptors after applying any provided filter parameters. Note
+ * that the user-facing description of this filter logic is presented on the class-level javadoc
+ * of {@link NormalizeTableFilterParams}.
*/
private List getTableDescriptors(final List htds,
final String namespace, final String regex, final List tableNameList,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
index c470acd60356..72040ae0b958 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/MasterRpcServices.java
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.UnknownRegionException;
import org.apache.hadoop.hbase.client.MasterSwitchType;
+import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
@@ -1920,7 +1921,14 @@ public NormalizeResponse normalize(RpcController controller,
NormalizeRequest request) throws ServiceException {
rpcPreCheck("normalize");
try {
- return NormalizeResponse.newBuilder().setNormalizerRan(master.normalizeRegions()).build();
+ final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
+ .tableNames(ProtobufUtil.toTableNameList(request.getTableNamesList()))
+ .regex(request.hasRegex() ? request.getRegex() : null)
+ .namespace(request.hasNamespace() ? request.getNamespace() : null)
+ .build();
+ return NormalizeResponse.newBuilder()
+ .setNormalizerRan(master.normalizeRegions(ntfp))
+ .build();
} catch (IOException ex) {
throw new ServiceException(ex);
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
index da4c52ea8bd5..ee9a160182da 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/normalizer/TestSimpleRegionNormalizerOnCluster.java
@@ -22,6 +22,7 @@
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
+import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.TimeUnit;
@@ -35,7 +36,8 @@
import org.apache.hadoop.hbase.Size;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter.ExplainingPredicate;
-import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.AsyncAdmin;
+import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Table;
@@ -79,7 +81,7 @@ public class TestSimpleRegionNormalizerOnCluster {
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static final byte[] FAMILY_NAME = Bytes.toBytes("fam");
- private static Admin admin;
+ private static AsyncAdmin admin;
private static HMaster master;
@Rule
@@ -94,9 +96,12 @@ public static void beforeAllTests() throws Exception {
// no way for the test to set the regionId on a created region, so disable this feature.
TEST_UTIL.getConfiguration().setInt("hbase.normalizer.merge.min_region_age.days", 0);
+ // disable the normalizer coming along and running via Chore
+ TEST_UTIL.getConfiguration().setInt("hbase.normalizer.period", Integer.MAX_VALUE);
+
TEST_UTIL.startMiniCluster(1);
TestNamespaceAuditor.waitForQuotaInitialize(TEST_UTIL);
- admin = TEST_UTIL.getAdmin();
+ admin = TEST_UTIL.getAsyncConnection().getAdmin();
master = TEST_UTIL.getHBaseCluster().getMaster();
assertNotNull(master);
}
@@ -107,17 +112,17 @@ public static void afterAllTests() throws Exception {
}
@Before
- public void before() throws IOException {
+ public void before() throws Exception {
// disable the normalizer ahead of time, let the test enable it when its ready.
- admin.normalizerSwitch(false);
+ admin.normalizerSwitch(false).get();
}
@Test
- public void testHonorsNormalizerSwitch() throws IOException {
- assertFalse(admin.isNormalizerEnabled());
- assertFalse(admin.normalize());
- assertFalse(admin.normalizerSwitch(true));
- assertTrue(admin.normalize());
+ public void testHonorsNormalizerSwitch() throws Exception {
+ assertFalse(admin.isNormalizerEnabled().get());
+ assertFalse(admin.normalize().get());
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize().get());
}
/**
@@ -137,8 +142,8 @@ public void testHonorsNormalizerTableSetting() throws Exception {
final int tn2RegionCount = createTableBegsSplit(tn2, false, false);
final int tn3RegionCount = createTableBegsSplit(tn3, true, true);
- assertFalse(admin.normalizerSwitch(true));
- assertTrue(admin.normalize());
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize().get());
waitForTableSplit(tn1, tn1RegionCount + 1);
// confirm that tn1 has (tn1RegionCount + 1) number of regions.
@@ -183,8 +188,8 @@ void testRegionNormalizationSplit(boolean limitedByQuota) throws Exception {
final int currentRegionCount = createTableBegsSplit(tableName, true, false);
final long existingSkippedSplitCount = master.getRegionNormalizer()
.getSkippedCount(PlanType.SPLIT);
- assertFalse(admin.normalizerSwitch(true));
- assertTrue(admin.normalize());
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize().get());
if (limitedByQuota) {
waitForSkippedSplits(master, existingSkippedSplitCount);
assertEquals(
@@ -208,8 +213,8 @@ public void testRegionNormalizationMerge() throws Exception {
final TableName tableName = TableName.valueOf(name.getMethodName());
try {
final int currentRegionCount = createTableBegsMerge(tableName);
- assertFalse(admin.normalizerSwitch(true));
- assertTrue(admin.normalize());
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize().get());
waitForTableMerge(tableName, currentRegionCount - 1);
assertEquals(
tableName + " should have merged.",
@@ -220,14 +225,103 @@ public void testRegionNormalizationMerge() throws Exception {
}
}
- private static TableName buildTableNameForQuotaTest(final String methodName) throws IOException {
+ @Test
+ public void testHonorsNamespaceFilter() throws Exception {
+ final NamespaceDescriptor namespaceDescriptor = NamespaceDescriptor.create("ns").build();
+ final TableName tn1 = TableName.valueOf("ns", name.getMethodName());
+ final TableName tn2 = TableName.valueOf(name.getMethodName());
+
+ try {
+ admin.createNamespace(namespaceDescriptor).get();
+ final int tn1RegionCount = createTableBegsSplit(tn1, true, false);
+ final int tn2RegionCount = createTableBegsSplit(tn2, true, false);
+ final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
+ .namespace("ns")
+ .build();
+
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize(ntfp).get());
+ waitForTableSplit(tn1, tn1RegionCount + 1);
+
+ // confirm that tn1 has (tn1RegionCount + 1) number of regions.
+ // tn2 has tn2RegionCount number of regions because it's not a member of the target namespace.
+ assertEquals(
+ tn1 + " should have split.",
+ tn1RegionCount + 1,
+ MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1));
+ waitForTableRegionCount(tn2, tn2RegionCount);
+ } finally {
+ dropIfExists(tn1);
+ dropIfExists(tn2);
+ }
+ }
+
+ @Test
+ public void testHonorsPatternFilter() throws Exception {
+ final TableName tn1 = TableName.valueOf(name.getMethodName() + "1");
+ final TableName tn2 = TableName.valueOf(name.getMethodName() + "2");
+
+ try {
+ final int tn1RegionCount = createTableBegsSplit(tn1, true, false);
+ final int tn2RegionCount = createTableBegsSplit(tn2, true, false);
+ final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
+ .regex(".*[1]")
+ .build();
+
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize(ntfp).get());
+ waitForTableSplit(tn1, tn1RegionCount + 1);
+
+ // confirm that tn1 has (tn1RegionCount + 1) number of regions.
+ // tn2 has tn2RegionCount number of regions because it fails filter.
+ assertEquals(
+ tn1 + " should have split.",
+ tn1RegionCount + 1,
+ MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1));
+ waitForTableRegionCount(tn2, tn2RegionCount);
+ } finally {
+ dropIfExists(tn1);
+ dropIfExists(tn2);
+ }
+ }
+
+ @Test
+ public void testHonorsNameFilter() throws Exception {
+ final TableName tn1 = TableName.valueOf(name.getMethodName() + "1");
+ final TableName tn2 = TableName.valueOf(name.getMethodName() + "2");
+
+ try {
+ final int tn1RegionCount = createTableBegsSplit(tn1, true, false);
+ final int tn2RegionCount = createTableBegsSplit(tn2, true, false);
+ final NormalizeTableFilterParams ntfp = new NormalizeTableFilterParams.Builder()
+ .tableNames(Collections.singletonList(tn1))
+ .build();
+
+ assertFalse(admin.normalizerSwitch(true).get());
+ assertTrue(admin.normalize(ntfp).get());
+ waitForTableSplit(tn1, tn1RegionCount + 1);
+
+ // confirm that tn1 has (tn1RegionCount + 1) number of regions.
+ // tn2 has tn3RegionCount number of regions because it fails filter:
+ assertEquals(
+ tn1 + " should have split.",
+ tn1RegionCount + 1,
+ MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tn1));
+ waitForTableRegionCount(tn2, tn2RegionCount);
+ } finally {
+ dropIfExists(tn1);
+ dropIfExists(tn2);
+ }
+ }
+
+ private static TableName buildTableNameForQuotaTest(final String methodName) throws Exception {
String nsp = "np2";
NamespaceDescriptor nspDesc =
NamespaceDescriptor.create(nsp)
.addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5")
.addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build();
- admin.createNamespace(nspDesc);
- return TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + methodName);
+ admin.createNamespace(nspDesc).get();
+ return TableName.valueOf(nsp, methodName);
}
private static void waitForSkippedSplits(final HMaster master,
@@ -347,16 +441,17 @@ private static double getRegionSizeMB(final MasterServices masterServices,
*/
private static int createTableBegsSplit(final TableName tableName,
final boolean normalizerEnabled, final boolean isMergeEnabled)
- throws IOException {
+ throws Exception {
final List generatedRegions = generateTestData(tableName, 1, 1, 2, 3, 5);
assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName));
- admin.flush(tableName);
+ admin.flush(tableName).get();
- final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName))
+ final TableDescriptor td = TableDescriptorBuilder
+ .newBuilder(admin.getDescriptor(tableName).get())
.setNormalizationEnabled(normalizerEnabled)
.setMergeEnabled(isMergeEnabled)
.build();
- admin.modifyTable(td);
+ admin.modifyTable(td).get();
// make sure relatively accurate region statistics are available for the test table. use
// the last/largest region as clue.
@@ -383,16 +478,17 @@ private static int createTableBegsSplit(final TableName tableName,
* sum of sizes of first two regions < average
*
*/
- private static int createTableBegsMerge(final TableName tableName) throws IOException {
+ private static int createTableBegsMerge(final TableName tableName) throws Exception {
// create 5 regions with sizes to trigger merge of small regions
final List generatedRegions = generateTestData(tableName, 1, 1, 3, 3, 5);
assertEquals(5, MetaTableAccessor.getRegionCount(TEST_UTIL.getConnection(), tableName));
- admin.flush(tableName);
+ admin.flush(tableName).get();
- final TableDescriptor td = TableDescriptorBuilder.newBuilder(admin.getDescriptor(tableName))
+ final TableDescriptor td = TableDescriptorBuilder
+ .newBuilder(admin.getDescriptor(tableName).get())
.setNormalizationEnabled(true)
.build();
- admin.modifyTable(td);
+ admin.modifyTable(td).get();
// make sure relatively accurate region statistics are available for the test table. use
// the last/largest region as clue.
@@ -411,12 +507,12 @@ private static int createTableBegsMerge(final TableName tableName) throws IOExce
return 5;
}
- private static void dropIfExists(final TableName tableName) throws IOException {
- if (tableName != null && admin.tableExists(tableName)) {
- if (admin.isTableEnabled(tableName)) {
- admin.disableTable(tableName);
+ private static void dropIfExists(final TableName tableName) throws Exception {
+ if (tableName != null && admin.tableExists(tableName).get()) {
+ if (admin.isTableEnabled(tableName).get()) {
+ admin.disableTable(tableName).get();
}
- admin.deleteTable(tableName);
+ admin.deleteTable(tableName).get();
}
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
index aad0d416d265..16aa12f601a1 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/rsgroup/VerifyingRSGroupAdmin.java
@@ -1,4 +1,4 @@
-/**
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -48,6 +48,7 @@
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.OnlineLogRecord;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
@@ -341,8 +342,9 @@ public CacheEvictionStats clearBlockCache(TableName tableName) throws IOExceptio
return admin.clearBlockCache(tableName);
}
- public boolean normalize() throws IOException {
- return admin.normalize();
+ @Override
+ public boolean normalize(NormalizeTableFilterParams ntfp) throws IOException {
+ return admin.normalize(ntfp);
}
public boolean isNormalizerEnabled() throws IOException {
diff --git a/hbase-shell/src/main/ruby/hbase/admin.rb b/hbase-shell/src/main/ruby/hbase/admin.rb
index f298a123dbcb..5392cdf8b8e8 100644
--- a/hbase-shell/src/main/ruby/hbase/admin.rb
+++ b/hbase-shell/src/main/ruby/hbase/admin.rb
@@ -258,9 +258,54 @@ def clear_block_cache(table_name)
#----------------------------------------------------------------------------------------------
# Requests region normalization for all configured tables in the cluster
- # Returns true if normalizer ran successfully
- def normalize
- @admin.normalize
+ # Returns true if normalize request was successfully submitted
+ def normalize(*args)
+ builder = org.apache.hadoop.hbase.client.NormalizeTableFilterParams::Builder.new
+ args.each do |arg|
+ unless arg.is_a?(String) || arg.is_a?(Hash)
+ raise(ArgumentError, "#{arg.class} of #{arg.inspect} is not of Hash or String type")
+ end
+
+ if arg.key?(TABLE_NAME)
+ table_name = arg.delete(TABLE_NAME)
+ unless table_name.is_a?(String)
+ raise(ArgumentError, "#{TABLE_NAME} must be of type String")
+ end
+
+ builder.tableNames(java.util.Collections.singletonList(TableName.valueOf(table_name)))
+ elsif arg.key?(TABLE_NAMES)
+ table_names = arg.delete(TABLE_NAMES)
+ unless table_names.is_a?(Array)
+ raise(ArgumentError, "#{TABLE_NAMES} must be of type Array")
+ end
+
+ table_name_list = java.util.LinkedList.new
+ table_names.each do |tn|
+ unless tn.is_a?(String)
+ raise(ArgumentError, "#{TABLE_NAMES} value #{tn} must be of type String")
+ end
+
+ table_name_list.add(TableName.valueOf(tn))
+ end
+ builder.tableNames(table_name_list)
+ elsif arg.key?(REGEX)
+ regex = arg.delete(REGEX)
+ raise(ArgumentError, "#{REGEX} must be of type String") unless regex.is_a?(String)
+
+ builder.regex(regex)
+ elsif arg.key?(NAMESPACE)
+ namespace = arg.delete(NAMESPACE)
+ unless namespace.is_a?(String)
+ raise(ArgumentError, "#{NAMESPACE} must be of type String")
+ end
+
+ builder.namespace(namespace)
+ else
+ raise(ArgumentError, "Unrecognized argument #{arg}")
+ end
+ end
+ ntfp = builder.build
+ @admin.normalize(ntfp)
end
#----------------------------------------------------------------------------------------------
diff --git a/hbase-shell/src/main/ruby/hbase_constants.rb b/hbase-shell/src/main/ruby/hbase_constants.rb
index 6db91c7cd02f..b1b0eaee6678 100644
--- a/hbase-shell/src/main/ruby/hbase_constants.rb
+++ b/hbase-shell/src/main/ruby/hbase_constants.rb
@@ -71,6 +71,7 @@ module HBaseConstants
POLICY = 'POLICY'.freeze
RAW = 'RAW'.freeze
READ_TYPE = 'READ_TYPE'.freeze
+ REGEX = 'REGEX'.freeze
REGIONSERVER = 'REGIONSERVER'.freeze
REGION_REPLICATION = 'REGION_REPLICATION'.freeze
REGION_REPLICA_ID = 'REGION_REPLICA_ID'.freeze
@@ -91,6 +92,8 @@ module HBaseConstants
STOPROW = 'STOPROW'.freeze
TABLE = 'TABLE'.freeze
TABLE_CFS = 'TABLE_CFS'.freeze
+ TABLE_NAME = 'TABLE_NAME'.freeze
+ TABLE_NAMES = 'TABLE_NAMES'.freeze
TIMERANGE = 'TIMERANGE'.freeze
TIMESTAMP = 'TIMESTAMP'.freeze
TYPE = 'TYPE'.freeze
diff --git a/hbase-shell/src/main/ruby/shell/commands/normalize.rb b/hbase-shell/src/main/ruby/shell/commands/normalize.rb
index 2840e845bd6f..70e524ae11c8 100644
--- a/hbase-shell/src/main/ruby/shell/commands/normalize.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/normalize.rb
@@ -22,18 +22,24 @@ module Commands
class Normalize < Command
def help
<<-EOF
-Trigger region normalizer for all tables which have NORMALIZATION_ENABLED flag set. Returns true
- if normalizer ran successfully, false otherwise. Note that this command has no effect
- if region normalizer is disabled (make sure it's turned on using 'normalizer_switch' command).
+Trigger the region normalizer. Without arguments, invokes the normalizer without a table filter.
+Any arguments are used to limit table selection. Returns true if the normalize request was
+submitted successfully, false otherwise. Note that this command has no effect if region normalizer
+is disabled (make sure it's turned on using 'normalizer_switch' command).
- Examples:
+Examples:
- hbase> normalize
+ hbase> normalize
+ hbase> normalize TABLE_NAME => 'my_table'
+ hbase> normalize TABLE_NAMES => ['foo', 'bar', 'baz']
+ hbase> normalize REGEX => 'my_.*'
+ hbase> normalize NAMESPACE => 'ns1'
+ hbase> normalize NAMESPACE => 'ns', REGEX => '*._BIG_.*'
EOF
end
- def command
- did_normalize_run = !!admin.normalize
+ def command(*args)
+ did_normalize_run = !!admin.normalize(*args)
formatter.row([did_normalize_run.to_s])
did_normalize_run
end
diff --git a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
index 9758d087b709..633238bc870b 100644
--- a/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
+++ b/hbase-thrift/src/main/java/org/apache/hadoop/hbase/thrift2/client/ThriftAdmin.java
@@ -1,5 +1,4 @@
-/**
- *
+/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@@ -44,6 +43,7 @@
import org.apache.hadoop.hbase.client.CompactionState;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.LogQueryFilter;
+import org.apache.hadoop.hbase.client.NormalizeTableFilterParams;
import org.apache.hadoop.hbase.client.OnlineLogRecord;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.SnapshotDescription;
@@ -639,7 +639,7 @@ public CacheEvictionStats clearBlockCache(TableName tableName) {
}
@Override
- public boolean normalize() {
+ public boolean normalize(NormalizeTableFilterParams ntfp) {
throw new NotImplementedException("normalize not supported in ThriftAdmin");
}