Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -321,6 +321,7 @@ private void addRegionToMap(Map<ServerName, List<RegionInfo>> assignmentMapForFa
regionsOnServer.add(region);
}

@Override
public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
return this.fnm.getFavoredNodes(regionInfo);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,4 +35,6 @@ void generateFavoredNodesForDaughter(List<ServerName> servers,

void generateFavoredNodesForMergedRegion(RegionInfo merged, RegionInfo [] mergeParents)
throws IOException;

List<ServerName> getFavoredNodes(RegionInfo regionInfo);
}
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,6 @@
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.ExecutorType;
import org.apache.hadoop.hbase.favored.FavoredNodesManager;
import org.apache.hadoop.hbase.favored.FavoredNodesPromoter;
import org.apache.hadoop.hbase.http.InfoServer;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils;
import org.apache.hadoop.hbase.ipc.RpcServer;
Expand All @@ -106,7 +105,6 @@
import org.apache.hadoop.hbase.master.assignment.TransitRegionStateProcedure;
import org.apache.hadoop.hbase.master.assignment.UnassignProcedure;
import org.apache.hadoop.hbase.master.balancer.BalancerChore;
import org.apache.hadoop.hbase.master.balancer.BaseLoadBalancer;
import org.apache.hadoop.hbase.master.balancer.ClusterStatusChore;
import org.apache.hadoop.hbase.master.balancer.LoadBalancerFactory;
import org.apache.hadoop.hbase.master.cleaner.DirScanPool;
Expand Down Expand Up @@ -186,6 +184,7 @@
import org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner;
import org.apache.hadoop.hbase.replication.master.ReplicationPeerConfigUpgrader;
import org.apache.hadoop.hbase.replication.regionserver.ReplicationStatus;
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfoManager;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.SecurityConstants;
Expand Down Expand Up @@ -383,7 +382,7 @@ public void run() {

private final LockManager lockManager = new LockManager(this);

private LoadBalancer balancer;
private RSGroupBasedLoadBalancer balancer;
private RegionNormalizer normalizer;
private BalancerChore balancerChore;
private RegionNormalizerChore normalizerChore;
Expand Down Expand Up @@ -438,9 +437,6 @@ public void run() {
private long splitPlanCount;
private long mergePlanCount;

/* Handle favored nodes information */
private FavoredNodesManager favoredNodesManager;

/** jetty server for master to redirect requests to regionserver infoServer */
private Server masterJettyServer;

Expand Down Expand Up @@ -765,7 +761,8 @@ public MetricsMaster getMasterMetrics() {
@VisibleForTesting
protected void initializeZKBasedSystemTrackers()
throws IOException, InterruptedException, KeeperException, ReplicationException {
this.balancer = LoadBalancerFactory.getLoadBalancer(conf);
this.balancer = new RSGroupBasedLoadBalancer();
this.balancer.setConf(conf);
this.normalizer = RegionNormalizerFactory.getRegionNormalizer(conf);
this.normalizer.setMasterServices(this);
this.normalizer.setMasterRpcServices((MasterRpcServices)rpcServices);
Expand Down Expand Up @@ -1049,9 +1046,6 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc
return temp;
});
}
if (this.balancer instanceof FavoredNodesPromoter) {
favoredNodesManager = new FavoredNodesManager(this);
}

// initialize load balancer
this.balancer.setMasterServices(this);
Expand Down Expand Up @@ -1101,11 +1095,11 @@ private void finishActiveMasterInitialization(MonitoredTask status) throws IOExc
// table states messing up master launch (namespace table, etc., are not assigned).
this.assignmentManager.processOfflineRegions();
// Initialize after meta is up as below scans meta
if (favoredNodesManager != null && !maintenanceMode) {
if (getFavoredNodesManager() != null && !maintenanceMode) {
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(getConnection());
snapshotOfRegionAssignment.initialize();
favoredNodesManager.initialize(snapshotOfRegionAssignment);
getFavoredNodesManager().initialize(snapshotOfRegionAssignment);
}

// set cluster status again after user regions are assigned
Expand Down Expand Up @@ -2042,14 +2036,13 @@ public void move(final byte[] encodedRegionName, byte[] destServerName) throws I
LOG.debug("Unable to determine a plan to assign " + hri);
return;
}
// TODO: What is this? I don't get it.
if (dest.equals(serverName) && balancer instanceof BaseLoadBalancer
&& !((BaseLoadBalancer)balancer).shouldBeOnMaster(hri)) {
// TODO: deal with table on master for rs group.
if (dest.equals(serverName)) {
// To avoid unnecessary region moving later by balancer. Don't put user
// regions on master.
LOG.debug("Skipping move of region " + hri.getRegionNameAsString()
+ " to avoid unnecessary region moving later by load balancer,"
+ " because it should not be on master");
LOG.debug("Skipping move of region " + hri.getRegionNameAsString() +
" to avoid unnecessary region moving later by load balancer," +
" because it should not be on master");
return;
}
}
Expand Down Expand Up @@ -3483,12 +3476,14 @@ public boolean isSplitOrMergeEnabled(MasterSwitchType switchType) {

/**
* Fetch the configured {@link LoadBalancer} class name. If none is set, a default is returned.
*
* <p/>
* Notice that, the base load balancer will always be {@link RSGroupBasedLoadBalancer} now, so
* this method will return the balancer used inside each rs group.
* @return The name of the {@link LoadBalancer} in use.
*/
public String getLoadBalancerClassName() {
return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, LoadBalancerFactory
.getDefaultLoadBalancerClass().getName());
return conf.get(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
LoadBalancerFactory.getDefaultLoadBalancerClass().getName());
}

/**
Expand All @@ -3503,13 +3498,13 @@ public SplitOrMergeTracker getSplitOrMergeTracker() {
}

@Override
public LoadBalancer getLoadBalancer() {
public RSGroupBasedLoadBalancer getLoadBalancer() {
return balancer;
}

@Override
public FavoredNodesManager getFavoredNodesManager() {
return favoredNodesManager;
return balancer.getFavoredNodesManager();
}

private long executePeerProcedure(AbstractPeerProcedure<?> procedure) throws IOException {
Expand Down Expand Up @@ -3795,7 +3790,7 @@ public HbckChore getHbckChore() {
}

@Override
public RSGroupInfoManager getRSRSGroupInfoManager() {
public RSGroupInfoManager getRSGroupInfoManager() {
return rsGroupInfoManager;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,15 @@ public interface LoadBalancer extends Configurable, Stoppable, ConfigurationObse
// We deliberately use 'localhost' so the operation will fail fast
ServerName BOGUS_SERVER_NAME = ServerName.valueOf("localhost,1,1");

/**
* Config for pluggable load balancers.
* @deprecated since 3.0.0, will be removed in 4.0.0. In the new implementation, as the base load
* balancer will always be the rs group based one, you should just use
* {@link org.apache.hadoop.hbase.HConstants#HBASE_MASTER_LOADBALANCER_CLASS} to
* config the per group load balancer.
*/
@Deprecated
String HBASE_RSGROUP_LOADBALANCER_CLASS = "hbase.rsgroup.grouploadbalancer.class";
/**
* Set the current cluster status. This allows a LoadBalancer to map host name to a server
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,7 @@
import org.apache.hadoop.hbase.master.procedure.MasterProcedureUtil.NonceProcedureRunnable;
import org.apache.hadoop.hbase.master.procedure.ServerCrashProcedure;
import org.apache.hadoop.hbase.mob.MobUtils;
import org.apache.hadoop.hbase.net.Address;
import org.apache.hadoop.hbase.procedure.MasterProcedureManager;
import org.apache.hadoop.hbase.procedure2.LockType;
import org.apache.hadoop.hbase.procedure2.LockedResource;
Expand Down Expand Up @@ -219,10 +220,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest;
Expand Down Expand Up @@ -275,10 +274,8 @@
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos
.SetSnapshotCleanupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest;
Expand Down Expand Up @@ -2380,12 +2377,18 @@ public ClearDeadServersResponse clearDeadServers(RpcController controller,
LOG.debug("Some dead server is still under processing, won't clear the dead server list");
response.addAllServerName(request.getServerNameList());
} else {
DeadServer deadServer = master.getServerManager().getDeadServers();
Set<Address> clearedServers = new HashSet<>();
for (HBaseProtos.ServerName pbServer : request.getServerNameList()) {
if (!master.getServerManager().getDeadServers()
.removeDeadServer(ProtobufUtil.toServerName(pbServer))) {
ServerName server = ProtobufUtil.toServerName(pbServer);
if (!deadServer.removeDeadServer(server)) {
response.addServerName(pbServer);
} else {
clearedServers.add(server.getAddress());
}
}
master.getRSGroupInfoManager().removeServers(clearedServers);
LOG.info("Remove decommissioned servers {} from RSGroup done", clearedServers);
}

if (master.cpHost != null) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -539,5 +539,5 @@ default SplitWALManager getSplitWALManager(){
/**
* @return the {@link RSGroupInfoManager}
*/
RSGroupInfoManager getRSRSGroupInfoManager();
RSGroupInfoManager getRSGroupInfoManager();
}
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@
import org.apache.hadoop.hbase.procedure2.ProcedureInMemoryChore;
import org.apache.hadoop.hbase.procedure2.util.StringUtils;
import org.apache.hadoop.hbase.regionserver.SequenceId;
import org.apache.hadoop.hbase.rsgroup.RSGroupBasedLoadBalancer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
Expand Down Expand Up @@ -324,6 +325,11 @@ private LoadBalancer getBalancer() {
return master.getLoadBalancer();
}

private FavoredNodesPromoter getFavoredNodePromoter() {
return (FavoredNodesPromoter) ((RSGroupBasedLoadBalancer) master.getLoadBalancer())
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need to cast to RSGroupBasedLoadBalancer?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The return value for the MasterServices interface is still LoadBalancer so...

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For 3.0, I thought we can change this in the future?

.getInternalBalancer();
}

private MasterProcedureEnv getProcedureEnvironment() {
return master.getMasterProcedureExecutor().getEnvironment();
}
Expand Down Expand Up @@ -367,7 +373,7 @@ public RegionStateStore getRegionStateStore() {

public List<ServerName> getFavoredNodes(final RegionInfo regionInfo) {
return this.shouldAssignRegionsWithFavoredNodes
? ((FavoredStochasticBalancer) getBalancer()).getFavoredNodes(regionInfo)
? getFavoredNodePromoter().getFavoredNodes(regionInfo)
: ServerName.EMPTY_SERVER_LIST;
}

Expand Down Expand Up @@ -1773,8 +1779,8 @@ public void markRegionAsSplit(final RegionInfo parent, final ServerName serverNa
regionStateStore.splitRegion(parent, daughterA, daughterB, serverName);
if (shouldAssignFavoredNodes(parent)) {
List<ServerName> onlineServers = this.master.getServerManager().getOnlineServersList();
((FavoredNodesPromoter)getBalancer()).
generateFavoredNodesForDaughter(onlineServers, parent, daughterA, daughterB);
getFavoredNodePromoter().generateFavoredNodesForDaughter(onlineServers, parent, daughterA,
daughterB);
}
}

Expand All @@ -1799,8 +1805,7 @@ public void markRegionAsMerged(final RegionInfo child, final ServerName serverNa
}
regionStateStore.mergeRegions(child, mergeParents, serverName);
if (shouldAssignFavoredNodes(child)) {
((FavoredNodesPromoter)getBalancer()).
generateFavoredNodesForMergedRegion(child, mergeParents);
getFavoredNodePromoter().generateFavoredNodesForMergedRegion(child, mergeParents);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,7 @@ private List<ServerName> getOnlineFavoredNodes(List<ServerName> onlineServers,
}
}

@Override
public synchronized List<ServerName> getFavoredNodes(RegionInfo regionInfo) {
return this.fnm.getFavoredNodes(regionInfo);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@
*/
package org.apache.hadoop.hbase.master.balancer;

import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.master.LoadBalancer;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.yetus.audience.InterfaceAudience;

/**
* The class that creates a load balancer from a conf.
Expand All @@ -30,8 +30,7 @@
public class LoadBalancerFactory {

/**
* The default {@link LoadBalancer} class.
*
* The default {@link LoadBalancer} class.
* @return The Class for the default {@link LoadBalancer}.
*/
public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {
Expand All @@ -40,16 +39,15 @@ public static Class<? extends LoadBalancer> getDefaultLoadBalancerClass() {

/**
* Create a loadbalancer from the given conf.
* @param conf
* @return A {@link LoadBalancer}
*/
public static LoadBalancer getLoadBalancer(Configuration conf) {

// Create the balancer
Class<? extends LoadBalancer> balancerKlass =
conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
LoadBalancer.class);
return ReflectionUtils.newInstance(balancerKlass, conf);

conf.getClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, getDefaultLoadBalancerClass(),
LoadBalancer.class);
LoadBalancer balancer = ReflectionUtils.newInstance(balancerKlass);
balancer.setConf(conf);
return balancer;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
package org.apache.hadoop.hbase.master.procedure;

import java.io.IOException;
import java.util.function.Supplier;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.master.MasterFileSystem;
Expand All @@ -28,6 +29,7 @@
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;

import org.apache.hadoop.hbase.procedure2.StateMachineProcedure;
import org.apache.hadoop.hbase.rsgroup.RSGroupInfo;
import org.apache.hadoop.hbase.util.FSUtils;

/**
Expand Down Expand Up @@ -122,4 +124,13 @@ public static void createDirectory(MasterFileSystem mfs, NamespaceDescriptor nsD
protected void releaseSyncLatch() {
ProcedurePrepareLatch.releaseLatch(syncLatch, this);
}

protected final void checkNamespaceRSGroup(MasterProcedureEnv env, NamespaceDescriptor nd)
throws IOException {
Supplier<String> forWhom = () -> "namespace " + nd.getName();
RSGroupInfo rsGroupInfo = MasterProcedureUtil.checkGroupExists(
env.getMasterServices().getRSGroupInfoManager()::getRSGroup,
MasterProcedureUtil.getNamespaceGroup(nd), forWhom);
MasterProcedureUtil.checkGroupNotEmpty(rsGroupInfo, forWhom);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -188,6 +188,7 @@ private boolean prepareCreate(final MasterProcedureEnv env) throws IOException {
return false;
}
getTableNamespaceManager(env).validateTableAndRegionCount(nsDescriptor);
checkNamespaceRSGroup(env, nsDescriptor);
return true;
}

Expand Down
Loading