use of org.apache.hadoop.hbase.master.ServerManager in project hbase by apache.
the class EnableTableProcedure method markRegionsOnline.
/**
* Mark offline regions of the table online
* @param env MasterProcedureEnv
* @param tableName the target table
* @return whether the operation is fully completed or being interrupted.
* @throws IOException
*/
private static boolean markRegionsOnline(final MasterProcedureEnv env, final TableName tableName) throws IOException {
final AssignmentManager assignmentManager = env.getMasterServices().getAssignmentManager();
final MasterServices masterServices = env.getMasterServices();
final ServerManager serverManager = masterServices.getServerManager();
boolean done = false;
// Get the regions of this table. We're done when all listed
// tables are onlined.
List<Pair<HRegionInfo, ServerName>> tableRegionsAndLocations;
if (TableName.META_TABLE_NAME.equals(tableName)) {
tableRegionsAndLocations = new MetaTableLocator().getMetaRegionsAndLocations(masterServices.getZooKeeper());
} else {
tableRegionsAndLocations = MetaTableAccessor.getTableRegionsAndLocations(masterServices.getConnection(), tableName);
}
int countOfRegionsInTable = tableRegionsAndLocations.size();
Map<HRegionInfo, ServerName> regionsToAssign = regionsToAssignWithServerName(env, tableRegionsAndLocations);
// need to potentially create some regions for the replicas
List<HRegionInfo> unrecordedReplicas = AssignmentManager.replicaRegionsNotRecordedInMeta(new HashSet<>(regionsToAssign.keySet()), masterServices);
Map<ServerName, List<HRegionInfo>> srvToUnassignedRegs = assignmentManager.getBalancer().roundRobinAssignment(unrecordedReplicas, serverManager.getOnlineServersList());
if (srvToUnassignedRegs != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> entry : srvToUnassignedRegs.entrySet()) {
for (HRegionInfo h : entry.getValue()) {
regionsToAssign.put(h, entry.getKey());
}
}
}
int offlineRegionsCount = regionsToAssign.size();
LOG.info("Table '" + tableName + "' has " + countOfRegionsInTable + " regions, of which " + offlineRegionsCount + " are offline.");
if (offlineRegionsCount == 0) {
return true;
}
List<ServerName> onlineServers = serverManager.createDestinationServersList();
Map<ServerName, List<HRegionInfo>> bulkPlan = env.getMasterServices().getAssignmentManager().getBalancer().retainAssignment(regionsToAssign, onlineServers);
if (bulkPlan != null) {
LOG.info("Bulk assigning " + offlineRegionsCount + " region(s) across " + bulkPlan.size() + " server(s), retainAssignment=true");
BulkAssigner ba = new GeneralBulkAssigner(masterServices, bulkPlan, assignmentManager, true);
try {
if (ba.bulkAssign()) {
done = true;
}
} catch (InterruptedException e) {
LOG.warn("Enable operation was interrupted when enabling table '" + tableName + "'");
// Preserve the interrupt.
Thread.currentThread().interrupt();
}
} else {
LOG.info("Balancer was unable to find suitable servers for table " + tableName + ", leaving unassigned");
}
return done;
}
use of org.apache.hadoop.hbase.master.ServerManager in project hbase by apache.
the class MergeTableRegionsProcedure method getRegionLoad.
private RegionLoad getRegionLoad(final MasterProcedureEnv env, final ServerName sn, final HRegionInfo hri) {
ServerManager serverManager = env.getMasterServices().getServerManager();
ServerLoad load = serverManager.getLoad(sn);
if (load != null) {
Map<byte[], RegionLoad> regionsLoad = load.getRegionsLoad();
if (regionsLoad != null) {
return regionsLoad.get(hri.getRegionName());
}
}
return null;
}
use of org.apache.hadoop.hbase.master.ServerManager in project hbase by apache.
the class ProcedureSyncWait method waitRegionServers.
protected static void waitRegionServers(final MasterProcedureEnv env) throws IOException {
final ServerManager sm = env.getMasterServices().getServerManager();
ProcedureSyncWait.waitFor(env, "server to assign region(s)", new ProcedureSyncWait.Predicate<Boolean>() {
@Override
public Boolean evaluate() throws IOException {
List<ServerName> servers = sm.createDestinationServersList();
return servers != null && !servers.isEmpty();
}
});
}
use of org.apache.hadoop.hbase.master.ServerManager in project hbase by apache.
the class TestBaseLoadBalancer method testRandomAssignment.
private void testRandomAssignment(int numberOfIdleServers) throws Exception {
assert numberOfIdleServers > 0;
List<ServerName> idleServers = new ArrayList<>(numberOfIdleServers);
for (int i = 0; i != numberOfIdleServers; ++i) {
idleServers.add(ServerName.valueOf("server-" + i, 1000, 1L));
}
List<ServerName> allServers = new ArrayList<>(idleServers.size() + 1);
allServers.add(ServerName.valueOf("server-" + numberOfIdleServers, 1000, 1L));
allServers.addAll(idleServers);
LoadBalancer balancer = new MockBalancer() {
@Override
public boolean shouldBeOnMaster(HRegionInfo region) {
return false;
}
};
Configuration conf = HBaseConfiguration.create();
conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class);
balancer.setConf(conf);
ServerManager sm = Mockito.mock(ServerManager.class);
Mockito.when(sm.getOnlineServersListWithPredicator(allServers, BaseLoadBalancer.IDLE_SERVER_PREDICATOR)).thenReturn(idleServers);
MasterServices services = Mockito.mock(MasterServices.class);
Mockito.when(services.getServerManager()).thenReturn(sm);
balancer.setMasterServices(services);
HRegionInfo hri1 = new HRegionInfo(TableName.valueOf(name.getMethodName()), "key1".getBytes(), "key2".getBytes(), false, 100);
assertNull(balancer.randomAssignment(hri1, Collections.EMPTY_LIST));
assertNull(balancer.randomAssignment(hri1, null));
for (int i = 0; i != 3; ++i) {
ServerName sn = balancer.randomAssignment(hri1, allServers);
assertTrue("actual:" + sn + ", except:" + idleServers, idleServers.contains(sn));
}
}
use of org.apache.hadoop.hbase.master.ServerManager in project hbase by apache.
the class RSGroupAdminServer method balanceRSGroup.
@Override
public boolean balanceRSGroup(String groupName) throws IOException {
ServerManager serverManager = master.getServerManager();
AssignmentManager assignmentManager = master.getAssignmentManager();
LoadBalancer balancer = master.getLoadBalancer();
boolean balancerRan;
synchronized (balancer) {
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().preBalanceRSGroup(groupName);
}
if (getRSGroupInfo(groupName) == null) {
throw new ConstraintException("RSGroup does not exist: " + groupName);
}
// Only allow one balance run at at time.
Map<String, RegionState> groupRIT = rsGroupGetRegionsInTransition(groupName);
if (groupRIT.size() > 0) {
LOG.debug("Not running balancer because " + groupRIT.size() + " region(s) in transition: " + StringUtils.abbreviate(master.getAssignmentManager().getRegionStates().getRegionsInTransition().toString(), 256));
return false;
}
if (serverManager.areDeadServersInProgress()) {
LOG.debug("Not running balancer because processing dead regionserver(s): " + serverManager.getDeadServers());
return false;
}
//We balance per group instead of per table
List<RegionPlan> plans = new ArrayList<>();
for (Map.Entry<TableName, Map<ServerName, List<HRegionInfo>>> tableMap : getRSGroupAssignmentsByTable(groupName).entrySet()) {
LOG.info("Creating partial plan for table " + tableMap.getKey() + ": " + tableMap.getValue());
List<RegionPlan> partialPlans = balancer.balanceCluster(tableMap.getValue());
LOG.info("Partial plan for table " + tableMap.getKey() + ": " + partialPlans);
if (partialPlans != null) {
plans.addAll(partialPlans);
}
}
long startTime = System.currentTimeMillis();
balancerRan = plans != null;
if (plans != null && !plans.isEmpty()) {
LOG.info("RSGroup balance " + groupName + " starting with plan count: " + plans.size());
for (RegionPlan plan : plans) {
LOG.info("balance " + plan);
assignmentManager.balance(plan);
}
LOG.info("RSGroup balance " + groupName + " completed after " + (System.currentTimeMillis() - startTime) + " seconds");
}
if (master.getMasterCoprocessorHost() != null) {
master.getMasterCoprocessorHost().postBalanceRSGroup(groupName, balancerRan);
}
}
return balancerRan;
}
Aggregations