use of org.apache.hadoop.hbase.master.RegionPlan in project hbase by apache.
the class MoveRegionProcedure method deserializeStateData.
@Override
protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException {
super.deserializeStateData(serializer);
final MoveRegionStateData state = serializer.deserialize(MoveRegionStateData.class);
// Get it from super class deserialization.
final RegionInfo regionInfo = getRegion();
final ServerName sourceServer = ProtobufUtil.toServerName(state.getSourceServer());
final ServerName destinationServer = state.hasDestinationServer() ? ProtobufUtil.toServerName(state.getDestinationServer()) : null;
this.plan = new RegionPlan(regionInfo, sourceServer, destinationServer);
}
use of org.apache.hadoop.hbase.master.RegionPlan in project hbase by apache.
the class TestSimpleLoadBalancer method testBalanceClusterOverallStrictly.
@Test
public void testBalanceClusterOverallStrictly() throws Exception {
int[] regionNumOfTable1PerServer = { 3, 3, 4, 4, 4, 4, 5, 5, 5 };
int[] regionNumOfTable2PerServer = { 2, 2, 2, 2, 2, 2, 2, 2, 1 };
TreeMap<ServerName, List<RegionInfo>> serverRegionInfo = new TreeMap<>();
List<ServerAndLoad> serverAndLoads = new ArrayList<>();
for (int i = 0; i < regionNumOfTable1PerServer.length; i++) {
ServerName serverName = ServerName.valueOf("server" + i, 1000, -1);
List<RegionInfo> regions1 = createRegions(regionNumOfTable1PerServer[i], TableName.valueOf("table1"));
List<RegionInfo> regions2 = createRegions(regionNumOfTable2PerServer[i], TableName.valueOf("table2"));
regions1.addAll(regions2);
serverRegionInfo.put(serverName, regions1);
ServerAndLoad serverAndLoad = new ServerAndLoad(serverName, regionNumOfTable1PerServer[i] + regionNumOfTable2PerServer[i]);
serverAndLoads.add(serverAndLoad);
}
HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> LoadOfAllTable = mockClusterServersWithTables(serverRegionInfo);
loadBalancer.setClusterLoad((Map) LoadOfAllTable);
List<RegionPlan> partialplans = loadBalancer.balanceTable(TableName.valueOf("table1"), LoadOfAllTable.get(TableName.valueOf("table1")));
List<ServerAndLoad> balancedServerLoads = reconcile(serverAndLoads, partialplans, serverRegionInfo);
for (ServerAndLoad serverAndLoad : balancedServerLoads) {
assertEquals(6, serverAndLoad.getLoad());
}
}
use of org.apache.hadoop.hbase.master.RegionPlan in project hbase by apache.
the class TestStochasticLoadBalancer method testLosingRs.
@Test
public void testLosingRs() throws Exception {
int numNodes = 3;
int numRegions = 20;
// all servers except one
int numRegionsPerServer = 3;
int replication = 1;
int numTables = 2;
Map<ServerName, List<RegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables);
List<ServerAndLoad> list = convertToList(serverMap);
List<RegionPlan> plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
assertNotNull(plans);
// Apply the plan to the mock cluster.
List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap);
assertClusterAsBalanced(balancedCluster);
ServerName sn = serverMap.keySet().toArray(new ServerName[serverMap.size()])[0];
ServerName deadSn = ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 100);
serverMap.put(deadSn, new ArrayList<>(0));
plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
assertNull(plans);
}
use of org.apache.hadoop.hbase.master.RegionPlan in project hbase by apache.
the class TestStochasticLoadBalancerHeterogeneousCost method testWithCluster.
@Override
protected void testWithCluster(final Map<ServerName, List<RegionInfo>> serverMap, final RackManager rackManager, final boolean assertFullyBalanced, final boolean assertFullyBalancedForReplicas) {
final List<ServerAndLoad> list = this.convertToList(serverMap);
LOG.info("Mock Cluster : " + this.printMock(list) + " " + this.printStats(list));
loadBalancer.setRackManager(rackManager);
// Run the balancer.
final List<RegionPlan> plans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
assertNotNull(plans);
// Check to see that this actually got to a stable place.
if (assertFullyBalanced || assertFullyBalancedForReplicas) {
// Apply the plan to the mock cluster.
final List<ServerAndLoad> balancedCluster = this.reconcile(list, plans, serverMap);
// Print out the cluster loads to make debugging easier.
LOG.info("Mock Balanced cluster : " + this.printMock(balancedCluster));
if (assertFullyBalanced) {
final List<RegionPlan> secondPlans = loadBalancer.balanceTable(HConstants.ENSEMBLE_TABLE_NAME, serverMap);
assertNull(secondPlans);
// create external cost function to retrieve limit
// for each RS
final HeterogeneousRegionCountCostFunction cf = new HeterogeneousRegionCountCostFunction(conf);
assertNotNull(cf);
BalancerClusterState cluster = new BalancerClusterState(serverMap, null, null, null);
cf.prepare(cluster);
// checking that we all hosts have a number of regions below their limit
for (final ServerAndLoad serverAndLoad : balancedCluster) {
final ServerName sn = serverAndLoad.getServerName();
final int numberRegions = serverAndLoad.getLoad();
final int limit = cf.findLimitForRS(sn);
double usage = (double) numberRegions / (double) limit;
LOG.debug(sn.getHostname() + ":" + numberRegions + "/" + limit + "(" + (usage * 100) + "%)");
// as the balancer is stochastic, we cannot check exactly the result of the balancing,
// hence the allowedWindow parameter
assertTrue("Host " + sn.getHostname() + " should be below " + cf.overallUsage * ALLOWED_WINDOW * 100 + "%; " + cf.overallUsage + ", " + usage + ", " + numberRegions + ", " + limit, usage <= cf.overallUsage * ALLOWED_WINDOW);
}
}
if (assertFullyBalancedForReplicas) {
this.assertRegionReplicaPlacement(serverMap, rackManager);
}
}
}
use of org.apache.hadoop.hbase.master.RegionPlan in project hbase by apache.
the class TestSimpleLoadBalancer method testImpactOfBalanceClusterOverall.
private void testImpactOfBalanceClusterOverall(boolean useLoadOfAllTable) throws Exception {
Map<TableName, Map<ServerName, List<RegionInfo>>> clusterLoad = new TreeMap<>();
Map<ServerName, List<RegionInfo>> clusterServers = mockUniformClusterServers(mockUniformCluster);
List<ServerAndLoad> clusterList = convertToList(clusterServers);
clusterLoad.put(TableName.valueOf(name.getMethodName()), clusterServers);
// use overall can achieve both table and cluster level balance
HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> LoadOfAllTable = mockClusterServersWithTables(clusterServers);
if (useLoadOfAllTable) {
loadBalancer.setClusterLoad((Map) LoadOfAllTable);
} else {
loadBalancer.setClusterLoad(clusterLoad);
}
List<RegionPlan> clusterplans1 = new ArrayList<RegionPlan>();
for (Map.Entry<TableName, TreeMap<ServerName, List<RegionInfo>>> mapEntry : LoadOfAllTable.entrySet()) {
TableName tableName = mapEntry.getKey();
TreeMap<ServerName, List<RegionInfo>> servers = mapEntry.getValue();
List<ServerAndLoad> list = convertToList(servers);
LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list));
List<RegionPlan> partialplans = loadBalancer.balanceTable(tableName, servers);
if (partialplans != null)
clusterplans1.addAll(partialplans);
List<ServerAndLoad> balancedClusterPerTable = reconcile(list, partialplans, servers);
LOG.info("Mock Balance : " + printMock(balancedClusterPerTable));
assertClusterAsBalanced(balancedClusterPerTable);
for (Map.Entry<ServerName, List<RegionInfo>> entry : servers.entrySet()) {
returnRegions(entry.getValue());
returnServer(entry.getKey());
}
}
List<ServerAndLoad> balancedCluster1 = reconcile(clusterList, clusterplans1, clusterServers);
assertTrue(assertClusterOverallAsBalanced(balancedCluster1, LoadOfAllTable.keySet().size()));
}
Aggregations