use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testRefreshAndWait.
@Test
public void testRefreshAndWait() throws Exception {
finder.getCache().invalidateAll();
for (RegionInfo region : REGIONS) {
assertNull(finder.getCache().getIfPresent(region));
}
finder.refreshAndWait(REGIONS);
for (RegionInfo region : REGIONS) {
assertNotNull(finder.getCache().getIfPresent(region));
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method setUpBeforeClass.
@BeforeClass
public static void setUpBeforeClass() {
TD = TableDescriptorBuilder.newBuilder(TableName.valueOf("RegionLocationFinder")).build();
int numRegions = 100;
REGIONS = new ArrayList<>(numRegions);
for (int i = 1; i <= numRegions; i++) {
byte[] startKey = i == 0 ? HConstants.EMPTY_START_ROW : Bytes.toBytes(i);
byte[] endKey = i == numRegions ? HConstants.EMPTY_BYTE_ARRAY : Bytes.toBytes(i + 1);
RegionInfo region = RegionInfoBuilder.newBuilder(TD.getTableName()).setStartKey(startKey).setEndKey(endKey).build();
REGIONS.add(region);
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestRegionHDFSBlockLocationFinder method testGetTopBlockLocations.
@Test
public void testGetTopBlockLocations() {
Map<ServerName, ServerMetrics> serverMetrics = new HashMap<>();
for (int i = 0; i < 10; i++) {
ServerName sn = ServerName.valueOf("host-" + i, 12345, 12345);
serverMetrics.put(sn, null);
}
ClusterMetrics metrics = mock(ClusterMetrics.class);
when(metrics.getLiveServerMetrics()).thenReturn(serverMetrics);
finder.setClusterMetrics(metrics);
for (RegionInfo region : REGIONS) {
List<ServerName> servers = finder.getTopBlockLocations(region);
long previousWeight = Long.MAX_VALUE;
HDFSBlocksDistribution hbd = generate(region);
for (ServerName server : servers) {
long weight = hbd.getWeight(server.getHostname());
assertTrue(weight <= previousWeight);
previousWeight = weight;
}
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestSimpleLoadBalancer method testBalanceClusterOverallStrictly.
@Test
public void testBalanceClusterOverallStrictly() throws Exception {
int[] regionNumOfTable1PerServer = { 3, 3, 4, 4, 4, 4, 5, 5, 5 };
int[] regionNumOfTable2PerServer = { 2, 2, 2, 2, 2, 2, 2, 2, 1 };
TreeMap<ServerName, List<RegionInfo>> serverRegionInfo = new TreeMap<>();
List<ServerAndLoad> serverAndLoads = new ArrayList<>();
for (int i = 0; i < regionNumOfTable1PerServer.length; i++) {
ServerName serverName = ServerName.valueOf("server" + i, 1000, -1);
List<RegionInfo> regions1 = createRegions(regionNumOfTable1PerServer[i], TableName.valueOf("table1"));
List<RegionInfo> regions2 = createRegions(regionNumOfTable2PerServer[i], TableName.valueOf("table2"));
regions1.addAll(regions2);
serverRegionInfo.put(serverName, regions1);
ServerAndLoad serverAndLoad = new ServerAndLoad(serverName, regionNumOfTable1PerServer[i] + regionNumOfTable2PerServer[i]);
serverAndLoads.add(serverAndLoad);
}
HashMap<TableName, TreeMap<ServerName, List<RegionInfo>>> LoadOfAllTable = mockClusterServersWithTables(serverRegionInfo);
loadBalancer.setClusterLoad((Map) LoadOfAllTable);
List<RegionPlan> partialplans = loadBalancer.balanceTable(TableName.valueOf("table1"), LoadOfAllTable.get(TableName.valueOf("table1")));
List<ServerAndLoad> balancedServerLoads = reconcile(serverAndLoads, partialplans, serverRegionInfo);
for (ServerAndLoad serverAndLoad : balancedServerLoads) {
assertEquals(6, serverAndLoad.getLoad());
}
}
use of org.apache.hadoop.hbase.client.RegionInfo in project hbase by apache.
the class TestFavoredNodeAssignmentHelper method testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack.
@Test
public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() {
// Test the case where there is only one server in one rack and another rack
// has more servers. We try to choose secondary/tertiary on different
// racks than what the primary is on. But if the other rack doesn't have
// enough nodes to have both secondary/tertiary RSs, the tertiary is placed
// on the same rack as the primary server is on
Map<String, Integer> rackToServerCount = new HashMap<>();
rackToServerCount.put("rack1", 2);
rackToServerCount.put("rack2", 1);
Triple<Map<RegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<RegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount);
FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond();
Map<RegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst();
List<RegionInfo> regions = primaryRSMapAndHelper.getThird();
assertTrue(primaryRSMap.size() == 6);
Map<RegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap);
assertTrue(secondaryAndTertiaryMap.size() == regions.size());
for (RegionInfo region : regions) {
ServerName s = primaryRSMap.get(region);
ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0];
ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1];
Set<String> racks = Sets.newHashSet(rackManager.getRack(s));
racks.add(rackManager.getRack(secondaryRS));
racks.add(rackManager.getRack(tertiaryRS));
assertTrue(racks.size() >= 2);
}
}
Aggregations